repo_name
stringlengths
13
51
branch_name
stringclasses
4 values
import_graph
stringlengths
40
15.7k
directory_tree
stringlengths
125
5.52k
file_1_name
stringlengths
5
79
file_1_content
stringlengths
27
58.8k
file_2_name
stringlengths
6
89
file_2_content
stringlengths
16
33.4k
file_3_name
stringlengths
6
79
file_3_content
stringlengths
60
29.4k
file_4_name
stringlengths
6
85
file_4_content
stringlengths
48
22.1k
file_5_name
stringlengths
6
86
file_5_content
stringlengths
38
27.8k
file_6_name
stringlengths
7
103
file_6_content
stringlengths
39
37.3k
file_7_name
stringlengths
7
99
file_7_content
stringlengths
66
41.3k
file_8_name
stringlengths
8
98
file_8_content
stringlengths
117
51.9k
file_9_name
stringlengths
8
96
file_9_content
stringlengths
75
27.4k
file_10_name
stringlengths
9
95
file_10_content
stringlengths
62
18.5k
file_11_name
stringlengths
8
93
file_11_content
stringlengths
25
14.1k
file_12_name
stringlengths
7
94
file_12_content
stringlengths
118
13.2k
file_13_name
stringlengths
8
98
file_13_content
stringlengths
35
29.6k
file_14_name
stringlengths
9
88
file_14_content
stringlengths
37
16.6k
file_15_name
stringlengths
10
100
file_15_content
stringlengths
90
31.5k
file_16_name
stringlengths
9
99
file_16_content
stringlengths
384
20.1k
file_17_name
stringlengths
14
90
file_17_content
stringlengths
202
14.2k
file_18_name
stringlengths
9
97
file_18_content
stringlengths
107
34.6k
file_19_name
stringlengths
9
97
file_19_content
stringlengths
146
26.4k
file_20_name
stringlengths
10
100
file_20_content
stringlengths
474
21.4k
file_21_name
stringclasses
19 values
file_21_content
stringclasses
19 values
file_22_name
stringclasses
16 values
file_22_content
stringclasses
16 values
file_23_name
stringclasses
14 values
file_23_content
stringclasses
14 values
file_24_name
stringclasses
10 values
file_24_content
stringclasses
10 values
file_25_name
stringclasses
9 values
file_25_content
stringclasses
9 values
file_26_name
stringclasses
9 values
file_26_content
stringclasses
9 values
file_27_name
stringclasses
7 values
file_27_content
stringclasses
7 values
file_28_name
stringclasses
7 values
file_28_content
stringclasses
7 values
file_29_name
stringclasses
7 values
file_29_content
stringclasses
7 values
file_30_name
stringclasses
6 values
file_30_content
stringclasses
6 values
file_31_name
stringclasses
6 values
file_31_content
stringclasses
6 values
file_32_name
stringclasses
6 values
file_32_content
stringclasses
6 values
file_33_name
stringclasses
6 values
file_33_content
stringclasses
6 values
file_34_name
stringclasses
5 values
file_34_content
stringclasses
5 values
file_35_name
stringclasses
5 values
file_35_content
stringclasses
5 values
file_36_name
stringclasses
4 values
file_36_content
stringclasses
4 values
file_37_name
stringclasses
4 values
file_37_content
stringclasses
4 values
file_38_name
stringclasses
4 values
file_38_content
stringclasses
4 values
file_39_name
stringclasses
4 values
file_39_content
stringclasses
4 values
file_40_name
stringclasses
4 values
file_40_content
stringclasses
4 values
file_41_name
stringclasses
4 values
file_41_content
stringclasses
4 values
file_42_name
stringclasses
4 values
file_42_content
stringclasses
4 values
file_43_name
stringclasses
4 values
file_43_content
stringclasses
4 values
file_44_name
stringclasses
4 values
file_44_content
stringclasses
4 values
file_45_name
stringclasses
4 values
file_45_content
stringclasses
4 values
file_46_name
stringclasses
4 values
file_46_content
stringclasses
4 values
file_47_name
stringclasses
4 values
file_47_content
stringclasses
4 values
file_48_name
stringclasses
4 values
file_48_content
stringclasses
4 values
file_49_name
stringclasses
4 values
file_49_content
stringclasses
4 values
file_50_name
stringclasses
3 values
file_50_content
stringclasses
3 values
file_51_name
stringclasses
3 values
file_51_content
stringclasses
3 values
file_52_name
stringclasses
3 values
file_52_content
stringclasses
3 values
file_53_name
stringclasses
3 values
file_53_content
stringclasses
3 values
file_54_name
stringclasses
3 values
file_54_content
stringclasses
3 values
file_55_name
stringclasses
3 values
file_55_content
stringclasses
3 values
file_56_name
stringclasses
3 values
file_56_content
stringclasses
3 values
file_57_name
stringclasses
3 values
file_57_content
stringclasses
3 values
file_58_name
stringclasses
1 value
file_58_content
stringclasses
1 value
file_59_name
stringclasses
1 value
file_59_content
stringclasses
1 value
file_60_name
stringclasses
1 value
file_60_content
stringclasses
1 value
file_61_name
stringclasses
1 value
file_61_content
stringclasses
1 value
file_62_name
stringclasses
1 value
file_62_content
stringclasses
1 value
file_63_name
stringclasses
1 value
file_63_content
stringclasses
1 value
file_64_name
stringclasses
1 value
file_64_content
stringclasses
1 value
file_65_name
stringclasses
1 value
file_65_content
stringclasses
1 value
file_66_name
stringclasses
1 value
file_66_content
stringclasses
1 value
file_67_name
stringclasses
1 value
file_67_content
stringclasses
1 value
file_68_name
stringclasses
1 value
file_68_content
stringclasses
1 value
file_69_name
stringclasses
1 value
file_69_content
stringclasses
1 value
file_70_name
stringclasses
1 value
file_70_content
stringclasses
1 value
file_71_name
stringclasses
1 value
file_71_content
stringclasses
1 value
file_72_name
stringclasses
1 value
file_72_content
stringclasses
1 value
file_73_name
stringclasses
1 value
file_73_content
stringclasses
1 value
file_74_name
stringclasses
1 value
file_74_content
stringclasses
1 value
file_75_name
stringclasses
1 value
file_75_content
stringclasses
1 value
file_76_name
stringclasses
1 value
file_76_content
stringclasses
1 value
file_77_name
stringclasses
1 value
file_77_content
stringclasses
1 value
file_78_name
stringclasses
1 value
file_78_content
stringclasses
1 value
file_79_name
stringclasses
1 value
file_79_content
stringclasses
1 value
file_80_name
stringclasses
1 value
file_80_content
stringclasses
1 value
file_81_name
stringclasses
1 value
file_81_content
stringclasses
1 value
file_82_name
stringclasses
1 value
file_82_content
stringclasses
1 value
file_83_name
stringclasses
1 value
file_83_content
stringclasses
1 value
file_84_name
stringclasses
1 value
file_84_content
stringclasses
1 value
file_85_name
stringclasses
1 value
file_85_content
stringclasses
1 value
file_86_name
stringclasses
1 value
file_86_content
stringclasses
1 value
file_87_name
stringclasses
1 value
file_87_content
stringclasses
1 value
file_88_name
stringclasses
1 value
file_88_content
stringclasses
1 value
file_89_name
stringclasses
1 value
file_89_content
stringclasses
1 value
file_90_name
stringclasses
1 value
file_90_content
stringclasses
1 value
file_91_name
stringclasses
1 value
file_91_content
stringclasses
1 value
file_92_name
stringclasses
1 value
file_92_content
stringclasses
1 value
file_93_name
stringclasses
1 value
file_93_content
stringclasses
1 value
file_94_name
stringclasses
1 value
file_94_content
stringclasses
1 value
file_95_name
stringclasses
1 value
file_95_content
stringclasses
1 value
file_96_name
stringclasses
1 value
file_96_content
stringclasses
1 value
file_97_name
stringclasses
1 value
file_97_content
stringclasses
1 value
shuishen112/pairwise-rnn
refs/heads/master
{"/run.py": ["/data_helper.py", "/config.py"], "/test.py": ["/data_helper.py", "/config.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"]}
└── ├── config.py ├── data_helper.py ├── main.py ├── models │ ├── QA_CNN_pairwise.py │ ├── __init__.py │ └── my │ └── nn.py ├── run.py └── test.py
/config.py
class Singleton(object): __instance=None def __init__(self): pass def getInstance(self): if Singleton.__instance is None: # Singleton.__instance=object.__new__(cls,*args,**kwd) Singleton.__instance=self.get_test_flag() print("build FLAGS over") return Singleton.__instance def get_test_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_rnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") # flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('data','trec','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_cnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_qcnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "qcnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','mean','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_8008_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS if __name__=="__main__": args=Singleton().get_test_flag() for attr, value in sorted(args.__flags.items()): print(("{}={}".format(attr.upper(), value)))
/data_helper.py
#-*- coding:utf-8 -*- import os import numpy as np import tensorflow as tf import string from collections import Counter import pandas as pd from tqdm import tqdm import random from functools import wraps import time import pickle def log_time_delta(func): @wraps(func) def _deco(*args, **kwargs): start = time.time() ret = func(*args, **kwargs) end = time.time() delta = end - start print( "%s runed %.2f seconds"% (func.__name__,delta)) return ret return _deco import tqdm from nltk.corpus import stopwords OVERLAP = 237 class Alphabet(dict): def __init__(self, start_feature_id = 1): self.fid = start_feature_id def add(self, item): idx = self.get(item, None) if idx is None: idx = self.fid self[item] = idx # self[idx] = item self.fid += 1 return idx def dump(self, fname): with open(fname, "w") as out: for k in sorted(self.keys()): out.write("{}\t{}\n".format(k, self[k])) def cut(sentence): tokens = sentence.lower().split() # tokens = [w for w in tokens if w not in stopwords.words('english')] return tokens @log_time_delta def load(dataset, filter = False): data_dir = "data/" + dataset datas = [] for data_name in ['train.txt','test.txt','dev.txt']: data_file = os.path.join(data_dir,data_name) data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0') # data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0') if filter == True: datas.append(removeUnanswerdQuestion(data)) else: datas.append(data) # sub_file = os.path.join(data_dir,'submit.txt') # submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3) # datas.append(submit) return tuple(datas) @log_time_delta def removeUnanswerdQuestion(df): counter= df.groupby("question").apply(lambda group: sum(group["flag"])) questions_have_correct=counter[counter>0].index counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0)) questions_have_uncorrect=counter[counter>0].index counter=df.groupby("question").apply(lambda group: len(group["flag"])) questions_multi=counter[counter>1].index return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index() @log_time_delta def get_alphabet(corpuses=None,dataset=""): pkl_name="temp/"+dataset+".alphabet.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) alphabet = Alphabet(start_feature_id = 0) alphabet.add('[UNK]') alphabet.add('END') count = 0 for corpus in corpuses: for texts in [corpus["question"].unique(),corpus["answer"]]: for sentence in texts: tokens = cut(sentence) for token in set(tokens): alphabet.add(token) print("alphabet size %d" % len(alphabet.keys()) ) if not os.path.exists("temp"): os.mkdir("temp") pickle.dump( alphabet,open(pkl_name,"wb")) return alphabet @log_time_delta def getSubVectorsFromDict(vectors,vocab,dim = 300): embedding = np.zeros((len(vocab),dim)) count = 1 for word in vocab: if word in vectors: count += 1 embedding[vocab[word]]= vectors[word] else: embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist() print( 'word in embedding',count) return embedding def encode_to_split(sentence,alphabet): indices = [] tokens = cut(sentence) seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens] return seq @log_time_delta def load_text_vec(alphabet,filename="",embedding_size = 100): vectors = {} with open(filename,encoding='utf-8') as f: i = 0 for line in f: i += 1 if i % 100000 == 0: print( 'epch %d' % i) items = line.strip().split(' ') if len(items) == 2: vocab_size, embedding_size= items[0],items[1] print( ( vocab_size, embedding_size)) else: word = items[0] if word in alphabet: vectors[word] = items[1:] print( 'embedding_size',embedding_size) print( 'done') print( 'words found in wor2vec embedding ',len(vectors.keys())) return vectors @log_time_delta def get_embedding(alphabet,dim = 300,language ="en",dataset=""): pkl_name="temp/"+dataset+".subembedding.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) if language=="en": fname = 'embedding/glove.6B/glove.6B.300d.txt' else: fname= "embedding/embedding.200.header_txt" embeddings = load_text_vec(alphabet,fname,embedding_size = dim) sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim) pickle.dump( sub_embeddings,open(pkl_name,"wb")) return sub_embeddings @log_time_delta def get_mini_batch_test(df,alphabet,batch_size): q = [] a = [] pos_overlap = [] for index,row in df.iterrows(): question = encode_to_split(row["question"],alphabet) answer = encode_to_split(row["answer"],alphabet) overlap_pos = overlap_index(row['question'],row['answer']) q.append(question) a.append(answer) pos_overlap.append(overlap_pos) m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in mini_batches: mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) yield(mb_q,mb_a) # calculate the overlap_index def overlap_index(question,answer,stopwords = []): ans_token = cut(answer) qset = set(cut(question)) aset = set(ans_token) a_len = len(ans_token) # q_index = np.arange(1,q_len) a_index = np.arange(1,a_len + 1) overlap = qset.intersection(aset) # for i,q in enumerate(cut(question)[:q_len]): # value = 1 # if q in overlap: # value = 2 # q_index[i] = value for i,a in enumerate(ans_token): if a in overlap: a_index[i] = OVERLAP return a_index def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False): q,a,neg_a=[],[],[] answers=df["answer"][:250] ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict() for question in tqdm(df['question'].unique()): index= ground_truth[question] canindates = [i for i in range(250)] canindates.remove(index) a_neg_index = random.choice(canindates) seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(answers[index],alphabet) seq_neg_a = encode_to_split(answers[a_neg_index],alphabet) q.append(seq_q) a.append( seq_a) neg_a.append(seq_neg_a ) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False): if sort_by_len: sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True) q = [ q[i] for i in sorted_index] a = [a[i] for i in sorted_index] neg_a = [ neg_a[i] for i in sorted_index] pos_overlap = [pos_overlap[i] for i in sorted_index] neg_overlap = [neg_overlap[i] for i in sorted_index] #get batch m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) if shuffle: np.random.shuffle(idx_list) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in tqdm(mini_batches): mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_neg_a = [ neg_a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_neg_overlap = [neg_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap) # mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap) # mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a) yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask) def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None): q = [] a = [] neg_a = [] for question in df['question'].unique(): # group = df[df["question"]==question] # pos_answers = group[df["flag"] == 1]["answer"] # neg_answers = group[df["flag"] == 0]["answer"].reset_index() group = df[df["question"]==question] pos_answers = group[group["flag"] == 1]["answer"] neg_answers = group[group["flag"] == 0]["answer"]#.reset_index() for pos in pos_answers: if model is not None and sess is not None: pos_sent= encode_to_split(pos,alphabet) q_sent,q_mask= prepare_data([pos_sent]) neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers] a_sent,a_mask= prepare_data(neg_sents) scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask)) neg_index = scores.argmax() else: if len(neg_answers.index) > 0: neg_index = np.random.choice(neg_answers.index) neg = neg_answers.reset_index().loc[neg_index,]["answer"] seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(pos,alphabet) seq_neg_a = encode_to_split(neg,alphabet) q.append(seq_q) a.append(seq_a) neg_a.append(seq_neg_a) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def prepare_data(seqs,overlap = None): lengths = [len(seq) for seq in seqs] n_samples = len(seqs) max_len = np.max(lengths) x = np.zeros((n_samples,max_len)).astype('int32') if overlap is not None: overlap_position = np.zeros((n_samples,max_len)).astype('float') for idx ,seq in enumerate(seqs): x[idx,:lengths[idx]] = seq overlap_position[idx,:lengths[idx]] = overlap[idx] return x,overlap_position else: x_mask = np.zeros((n_samples, max_len)).astype('float') for idx, seq in enumerate(seqs): x[idx, :lengths[idx]] = seq x_mask[idx, :lengths[idx]] = 1.0 # print( x, x_mask) return x, x_mask # def prepare_data(seqs): # lengths = [len(seq) for seq in seqs] # n_samples = len(seqs) # max_len = np.max(lengths) # x = np.zeros((n_samples, max_len)).astype('int32') # x_mask = np.zeros((n_samples, max_len)).astype('float') # for idx, seq in enumerate(seqs): # x[idx, :lengths[idx]] = seq # x_mask[idx, :lengths[idx]] = 1.0 # # print( x, x_mask) # return x, x_mask def getLogger(): import sys import logging import os import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) if not os.path.exists(log_filename): os.mkdir(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) return logger
/main.py
import data_helper import time import datetime import os import tensorflow as tf import numpy as np import evaluation now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) timeDay = time.strftime("%Y%m%d", timeArray) print (timeStamp) def main(args): args._parse_flags() print("\nParameters:") for attr, value in sorted(args.__flags.items()): print(("{}={}".format(attr.upper(), value))) log_dir = 'log/'+ timeDay if not os.path.exists(log_dir): os.makedirs(log_dir) data_file = log_dir + '/test_' + args.data + timeStamp precision = data_file + 'precise' print('load data ...........') train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev]) print('the number of words',len(alphabet)) print('get embedding') if args.data=="quora": embedding = data_helper.get_embedding(alphabet,language="cn") else: embedding = data_helper.get_embedding(alphabet) with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model = QA_CNN_extend(max_input_left = q_max_sent_length, max_input_right = a_max_sent_length, batch_size = args.batch_size, vocab_size = len(alphabet), embedding_size = args.embedding_dim, filter_sizes = list(map(int, args.filter_sizes.split(","))), num_filters = args.num_filters, hidden_size = args.hidden_size, dropout_keep_prob = args.dropout_keep_prob, embeddings = embedding, l2_reg_lambda = args.l2_reg_lambda, trainable = args.trainable, pooling = args.pooling, conv = args.conv) model.build_graph() sess.run(tf.global_variables_initializer()) def train_step(model,sess,batch): for data in batch: feed_dict = { model.question:data[0], model.answer:data[1], model.answer_negative:data[2], model.q_mask:data[3], model.a_mask:data[4], model.a_neg_mask:data[5] } _, summary, step, loss, accuracy,score12, score13, see = sess.run( [model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see], feed_dict) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) def predict(model,sess,batch,test): scores = [] for data in batch: feed_dict = { model.question:data[0], model.answer:data[1], model.q_mask:data[2], model.a_mask:data[3] } score = sess.run( model.score12, feed_dict) scores.extend(score) return np.array(scores[:len(test)]) for i in range(args.num_epoches): datas = data_helper.get_mini_batch(train,alphabet,args.batch_size) train_step(model,sess,datas) test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) predicted_test = predict(model,sess,test_datas,test) print(len(predicted_test)) print(len(test)) map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) print('map_mrr test',map_mrr_test)
/models/QA_CNN_pairwise.py
#coding:utf-8 import tensorflow as tf import numpy as np from tensorflow.contrib import rnn import models.blocks as blocks # model_type :apn or qacnn class QA_CNN_extend(object): # def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size, # dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'): # # """ # QA_RNN model for question answering # # Args: # self.dropout_keep_prob: dropout rate # self.num_filters : number of filters # self.para : parameter list # self.extend_feature_dim : my extend feature dimension # self.max_input_left : the length of question # self.max_input_right : the length of answer # self.pooling : pooling strategy :max pooling or attentive pooling # # """ # self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob') # self.num_filters = num_filters # self.embeddings = embeddings # self.embedding_size = embedding_size # self.batch_size = batch_size # self.filter_sizes = filter_sizes # self.l2_reg_lambda = l2_reg_lambda # self.para = [] # # self.max_input_left = max_input_left # self.max_input_right = max_input_right # self.trainable = trainable # self.vocab_size = vocab_size # self.pooling = pooling # self.total_num_filter = len(self.filter_sizes) * self.num_filters # # self.conv = conv # self.pooling = 'traditional' # self.learning_rate = learning_rate # # self.hidden_size = hidden_size # # self.attention_size = 100 def __init__(self,opt): for key,value in opt.items(): self.__setattr__(key,value) self.attention_size = 100 self.pooling = 'mean' self.total_num_filter = len(self.filter_sizes) * self.num_filters self.para = [] self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob') def create_placeholder(self): print(('Create placeholders')) # he length of the sentence is varied according to the batch,so the None,None self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question') self.max_input_left = tf.shape(self.question)[1] self.batch_size = tf.shape(self.question)[0] self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer') self.max_input_right = tf.shape(self.answer)[1] self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right') # self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask') # self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask') # self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask') def add_embeddings(self): print( 'add embeddings') if self.embeddings is not None: print( "load embedding") W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable) else: print( "random embedding") W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable) self.embedding_W = W # self.overlap_W = tf.Variable(a,name="W",trainable = True) self.para.append(self.embedding_W) self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question) self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer) self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative) #real length self.q_len,self.q_mask = blocks.length(self.question) self.a_len,self.a_mask = blocks.length(self.answer) self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative) def convolution(self): print( 'convolution:wide_convolution') self.kernels = [] for i,filter_size in enumerate(self.filter_sizes): with tf.name_scope('conv-max-pool-%s' % filter_size): filter_shape = [filter_size,self.embedding_size,1,self.num_filters] W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W") b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b") self.kernels.append((W,b)) self.para.append(W) self.para.append(b) embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding] self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings] #convolution def pooling_graph(self): if self.pooling == 'mean': self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask) self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask) self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask) self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask) elif self.pooling == 'attentive': self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) elif self.pooling == 'position': self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) elif self.pooling == 'traditional': print( self.pooling) print(self.q_cnn) self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) def para_initial(self): # print(("---------")) # self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp')) self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U')) self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm')) self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm')) self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms')) self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi')) def mean_pooling(self,conv,mask): conv = tf.squeeze(conv,2) print( tf.expand_dims(tf.cast(mask,tf.float32),-1)) # conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1)) # self.see = conv_mask # print( conv_mask) return tf.reduce_mean(conv,axis = 1); def attentive_pooling(self,input_left,input_right,q_mask,a_mask): Q = tf.squeeze(input_left,axis = 2) A = tf.squeeze(input_right,axis = 2) print( Q) print( A) # Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q') # A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A') # G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\ # A,transpose_b = True),name = 'G') first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U) second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters]) result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1])) print( second_step) print( tf.transpose(A,perm = [0,2,1])) # print( 'result',result) G = tf.tanh(result) # G = result # column-wise pooling ,row-wise pooling row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling') col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling') self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q') self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1)) self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a') self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1)) self.see = G R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q') R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a') return R_q,R_a def traditional_attention(self,input_left,input_right,q_mask,a_mask): input_left = tf.squeeze(input_left,axis = 2) input_right = tf.squeeze(input_right,axis = 2) input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2)) Q = tf.reduce_mean(input_left_mask,1) a_shape = tf.shape(input_right) A = tf.reshape(input_right,[-1,self.total_num_filter]) m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1)) f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1])) self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2)) self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True)) self.see = self.f_attention_norm a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1) return Q,a_attention def position_attention(self,input_left,input_right,q_mask,a_mask): input_left = tf.squeeze(input_left,axis = 2) input_right = tf.squeeze(input_right,axis = 2) # Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q') # A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A') Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1) QU = tf.matmul(Q,self.U) QUA = tf.multiply(tf.expand_dims(QU,1),input_right) self.attention_a = tf.cast(tf.argmax(QUA,2) ,tf.float32) # q_shape = tf.shape(input_left) # Q_1 = tf.reshape(input_left,[-1,self.total_num_filter]) # QU = tf.matmul(Q_1,self.U) # QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter]) # A_1 = tf.transpose(input_right,[0,2,1]) # QUA = tf.matmul(QU_1,A_1) # QUA = tf.nn.l2_normalize(QUA,1) # G = tf.tanh(QUA) # Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1) # # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)) # row_pooling = tf.reduce_max(G,1,name="row_pooling") # col_pooling = tf.reduce_max(G,2,name="col_pooling") # self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a") self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32)) self.see = self.attention_a self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True)) self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter]) return Q ,self.r_a def create_loss(self): with tf.name_scope('score'): self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn) self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn) l2_loss = tf.constant(0.0) for p in self.para: l2_loss += tf.nn.l2_loss(p) with tf.name_scope("loss"): self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13))) self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss tf.summary.scalar('loss', self.loss) # Accuracy with tf.name_scope("accuracy"): self.correct = tf.equal(0.0, self.losses) self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy") tf.summary.scalar('accuracy', self.accuracy) def create_op(self): self.global_step = tf.Variable(0, name = "global_step", trainable = False) self.optimizer = tf.train.AdamOptimizer(self.learning_rate) self.grads_and_vars = self.optimizer.compute_gradients(self.loss) self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step) def max_pooling(self,conv,input_length): pooled = tf.nn.max_pool( conv, ksize = [1, input_length, 1, 1], strides = [1, 1, 1, 1], padding = 'VALID', name="pool") return pooled def getCosine(self,q,a): pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder) pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder) pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1)) pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1)) pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1) score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores") return score def wide_convolution(self,embedding): cnn_outputs = [] for i,filter_size in enumerate(self.filter_sizes): conv = tf.nn.conv2d( embedding, self.kernels[i][0], strides=[1, 1, self.embedding_size, 1], padding='SAME', name="conv-1" ) h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1") cnn_outputs.append(h) cnn_reshaped = tf.concat(cnn_outputs,3) return cnn_reshaped def variable_summaries(self,var): with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def build_graph(self): self.create_placeholder() self.add_embeddings() self.para_initial() self.convolution() self.pooling_graph() self.create_loss() self.create_op() self.merged = tf.summary.merge_all() def train(self,sess,data): feed_dict = { self.question:data[0], self.answer:data[1], self.answer_negative:data[2], # self.q_mask:data[3], # self.a_mask:data[4], # self.a_neg_mask:data[5], self.dropout_keep_prob_holder:self.dropout_keep_prob } _, summary, step, loss, accuracy,score12, score13, see = sess.run( [self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see], feed_dict) return _, summary, step, loss, accuracy,score12, score13, see def predict(self,sess,data): feed_dict = { self.question:data[0], self.answer:data[1], # self.q_mask:data[2], # self.a_mask:data[3], self.dropout_keep_prob_holder:1.0 } score = sess.run( self.score12, feed_dict) return score if __name__ == '__main__': cnn = QA_CNN_extend( max_input_left = 33, max_input_right = 40, batch_size = 3, vocab_size = 5000, embedding_size = 100, filter_sizes = [3,4,5], num_filters = 64, hidden_size = 100, dropout_keep_prob = 1.0, embeddings = None, l2_reg_lambda = 0.0, trainable = True, pooling = 'max', conv = 'wide') cnn.build_graph() input_x_1 = np.reshape(np.arange(3 * 33),[3,33]) input_x_2 = np.reshape(np.arange(3 * 40),[3,40]) input_x_3 = np.reshape(np.arange(3 * 40),[3,40]) q_mask = np.ones((3,33)) a_mask = np.ones((3,40)) a_neg_mask = np.ones((3,40)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) feed_dict = { cnn.question:input_x_1, cnn.answer:input_x_2, # cnn.answer_negative:input_x_3, cnn.q_mask:q_mask, cnn.a_mask:a_mask, cnn.dropout_keep_prob_holder:cnn.dropout_keep # cnn.a_neg_mask:a_neg_mask # cnn.q_pos_overlap:q_pos_embedding, # cnn.q_neg_overlap:q_neg_embedding, # cnn.a_pos_overlap:a_pos_embedding, # cnn.a_neg_overlap:a_neg_embedding, # cnn.q_position:q_position, # cnn.a_pos_position:a_pos_position, # cnn.a_neg_position:a_neg_position } question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict) print( question.shape,answer.shape) print( score)
/models/__init__.py
from .QA_CNN_pairwise import QA_CNN_extend as CNN from .QA_RNN_pairwise import QA_RNN_extend as RNN from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN def setup(opt): if opt["model_name"]=="cnn": model=CNN(opt) elif opt["model_name"]=="rnn": model=RNN(opt) elif opt['model_name']=='qcnn': model=QCNN(opt) else: print("no model") exit(0) return model
/models/my/nn.py
from my.general import flatten, reconstruct, add_wd, exp_mask import numpy as np import tensorflow as tf _BIAS_VARIABLE_NAME = "bias" _WEIGHTS_VARIABLE_NAME = "kernel" def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, is_train=None):#, name_w='', name_b='' # if args is None or (nest.is_sequence(args) and not args): # raise ValueError("`args` must be specified") # if not nest.is_sequence(args): # args = [args] flat_args = [flatten(arg, 1) for arg in args]#[210,20] # if input_keep_prob < 1.0: # assert is_train is not None flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args] total_arg_size = 0#[60] shapes = [a.get_shape() for a in flat_args] for shape in shapes: if shape.ndims != 2: raise ValueError("linear is expecting 2D arguments: %s" % shapes) if shape[1].value is None: raise ValueError("linear expects shape[1] to be provided for shape %s, " "but saw %s" % (shape, shape[1])) else: total_arg_size += shape[1].value # print(total_arg_size) # exit() dtype = [a.dtype for a in flat_args][0] # scope = tf.get_variable_scope() with tf.variable_scope(scope) as outer_scope: weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype) if len(flat_args) == 1: res = tf.matmul(flat_args[0], weights) else: res = tf.matmul(tf.concat(flat_args, 1), weights) if not bias: flat_out = res else: with tf.variable_scope(outer_scope) as inner_scope: inner_scope.set_partitioner(None) biases = tf.get_variable( _BIAS_VARIABLE_NAME, [output_size], dtype=dtype, initializer=tf.constant_initializer(bias_start, dtype=dtype)) flat_out = tf.nn.bias_add(res, biases) out = reconstruct(flat_out, args[0], 1) if squeeze: out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1]) if wd: add_wd(wd) return out def softmax(logits, mask=None, scope=None): with tf.name_scope(scope or "Softmax"): if mask is not None: logits = exp_mask(logits, mask) flat_logits = flatten(logits, 1) flat_out = tf.nn.softmax(flat_logits) out = reconstruct(flat_out, logits, 1) return out def softsel(target, logits, mask=None, scope=None): """ :param target: [ ..., J, d] dtype=float :param logits: [ ..., J], dtype=float :param mask: [ ..., J], dtype=bool :param scope: :return: [..., d], dtype=float """ with tf.name_scope(scope or "Softsel"): a = softmax(logits, mask = mask) target_rank = len(target.get_shape().as_list()) out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2) return out def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0): with tf.variable_scope(scope or "highway_layer"): d = arg.get_shape()[-1] trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob) trans = tf.nn.relu(trans) gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob) gate = tf.nn.sigmoid(gate) out = gate * trans + (1 - gate) * arg return out def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0): with tf.variable_scope(scope or "highway_network"): prev = arg cur = None for layer_idx in range(num_layers): cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd, input_keep_prob=input_keep_prob) prev = cur return cur def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None): with tf.variable_scope(scope or "conv1d"): num_channels = in_.get_shape()[-1] filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float') bias = tf.get_variable("bias", shape=[filter_size], dtype='float') strides = [1, 1, 1, 1] in_ = tf.nn.dropout(in_, keep_prob) xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d] out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d] return out def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None): with tf.variable_scope(scope or "multi_conv1d"): assert len(filter_sizes) == len(heights) outs = [] for filter_size, height in zip(filter_sizes, heights): if filter_size == 0: continue out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height)) outs.append(out) concat_out = tf.concat(outs, axis=2) return concat_out if __name__ == '__main__': a = tf.Variable(np.random.random(size=(2,2,4))) b = tf.Variable(np.random.random(size=(2,3,4))) c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1]) test = flatten(c,1) out = reconstruct(test, c, 1) d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1]) e = linear([c,d,c*d],1,bias = False,scope = "test",) # f = softsel(d, e) with tf.Session() as sess: tf.global_variables_initializer().run() print(sess.run(test)) print(sess.run(tf.shape(out))) exit() print(sess.run(tf.shape(a))) print(sess.run(a)) print(sess.run(tf.shape(b))) print(sess.run(b)) print(sess.run(tf.shape(c))) print(sess.run(c)) print(sess.run(tf.shape(d))) print(sess.run(d)) print(sess.run(tf.shape(e))) print(sess.run(e))
/run.py
from tensorflow import flags import tensorflow as tf from config import Singleton import data_helper import datetime,os import models import numpy as np import evaluation import sys import logging import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename('program') logger = logging.getLogger(program) if not os.path.exists(log_filename): os.makedirs(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) from data_helper import log_time_delta,getLogger logger=getLogger() args = Singleton().get_qcnn_flag() args._parse_flags() opts=dict() logger.info("\nParameters:") for attr, value in sorted(args.__flags.items()): logger.info(("{}={}".format(attr.upper(), value))) opts[attr]=value train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) logger.info('the number of words :%d '%len(alphabet)) if args.data=="quora" or args.data=="8008" : print("cn embedding") embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) train_data_loader = data_helper.getBatch48008 else: embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) train_data_loader = data_helper.get_mini_batch opts["embeddings"] =embedding opts["vocab_size"]=len(alphabet) opts["max_input_right"]=a_max_sent_length opts["max_input_left"]=q_max_sent_length opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) print("innitilize over") #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): with tf.Graph().as_default(): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model=models.setup(opts) model.build_graph() saver = tf.train.Saver() # ckpt = tf.train.get_checkpoint_state("checkpoint") # if ckpt and ckpt.model_checkpoint_path: # # Restores from checkpoint # saver.restore(sess, ckpt.model_checkpoint_path) # if os.path.exists("model") : # import shutil # shutil.rmtree("model") # builder = tf.saved_model.builder.SavedModelBuilder("./model") # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) # builder.save(True) # variable_averages = tf.train.ExponentialMovingAverage( model) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore) # for name in variables_to_restore: # print(name) sess.run(tf.global_variables_initializer()) @log_time_delta def predict(model,sess,batch,test): scores = [] for data in batch: score = model.predict(sess,data) scores.extend(score) return np.array(scores[:len(test)]) best_p1=0 for i in range(args.num_epoches): for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess): # for data in data_helper.getBatch48008(train,alphabet,args.batch_size): _, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) #<<<<<<< HEAD # # # if i>0 and i % 5 ==0: # test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) # # predicted_test = predict(model,sess,test_datas,test) # map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) # # logger.info('map_mrr test' +str(map_mrr_test)) # print('map_mrr test' +str(map_mrr_test)) # # test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size) # predicted_test = predict(model,sess,test_datas,dev) # map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test) # # logger.info('map_mrr dev' +str(map_mrr_test)) # print('map_mrr dev' +str(map_mrr_test)) # map,mrr,p1 = map_mrr_test # if p1>best_p1: # best_p1=p1 # filename= "checkpoint/"+args.data+"_"+str(p1)+".model" # save_path = saver.save(sess, filename) # # load_path = saver.restore(sess, model_path) # # import shutil # shutil.rmtree("model") # builder = tf.saved_model.builder.SavedModelBuilder("./model") # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) # builder.save(True) # # #======= test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) predicted_test = predict(model,sess,test_datas,test) map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) logger.info('map_mrr test' +str(map_mrr_test)) print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
/test.py
# -*- coding: utf-8 -*- from tensorflow import flags import tensorflow as tf from config import Singleton import data_helper import datetime import os import models import numpy as np import evaluation from data_helper import log_time_delta,getLogger logger=getLogger() args = Singleton().get_rnn_flag() #args = Singleton().get_8008_flag() args._parse_flags() opts=dict() logger.info("\nParameters:") for attr, value in sorted(args.__flags.items()): logger.info(("{}={}".format(attr.upper(), value))) opts[attr]=value train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) logger.info('the number of words :%d '%len(alphabet)) if args.data=="quora" or args.data=="8008" : print("cn embedding") embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) train_data_loader = data_helper.getBatch48008 else: embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) train_data_loader = data_helper.get_mini_batch opts["embeddings"] =embedding opts["vocab_size"]=len(alphabet) opts["max_input_right"]=a_max_sent_length opts["max_input_left"]=q_max_sent_length opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) print("innitilize over") #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): with tf.Graph().as_default(): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model=models.setup(opts) model.build_graph() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) # fun first than print or save ckpt = tf.train.get_checkpoint_state("checkpoint") if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint saver.restore(sess, ckpt.model_checkpoint_path) print(sess.run(model.position_embedding)[0]) if os.path.exists("model") : import shutil shutil.rmtree("model") builder = tf.saved_model.builder.SavedModelBuilder("./model") builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) builder.save(True) variable_averages = tf.train.ExponentialMovingAverage( model) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) for name in variables_to_restore: print(name) @log_time_delta def predict(model,sess,batch,test): scores = [] for data in batch: score = model.predict(sess,data) scores.extend(score) return np.array(scores[:len(test)]) text = "怎么 提取 公积金 ?" splited_text=data_helper.encode_to_split(text,alphabet) mb_q,mb_q_mask = data_helper.prepare_data([splited_text]) mb_a,mb_a_mask = data_helper.prepare_data([splited_text]) data = (mb_q,mb_a,mb_q_mask,mb_a_mask) score = model.predict(sess,data) print(score) feed_dict = { model.question:data[0], model.answer:data[1], model.q_mask:data[2], model.a_mask:data[3], model.dropout_keep_prob_holder:1.0 } sess.run(model.position_embedding,feed_dict=feed_dict)[0]
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Sssssbo/SDCNet
refs/heads/master
{"/SDCNet.py": ["/model.py", "/datasets.py", "/misc.py"], "/infer_SDCNet.py": ["/model.py", "/datasets.py", "/misc.py"], "/create_free.py": ["/datasets.py", "/misc.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/model/make_model.py": ["/model/backbones/resnet.py"]}
└── ├── SDCNet.py ├── count_dataset.py ├── create_free.py ├── datasets.py ├── infer_SDCNet.py ├── misc.py ├── model │ ├── backbones │ │ └── resnet.py │ └── make_model.py ├── model.py ├── resnet │ ├── __init__.py │ ├── config.py │ └── make_model.py └── resnext └── __init__.py
/SDCNet.py
import datetime import os import time import torch from torch import nn from torch import optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import transforms import pandas as pd import numpy as np import joint_transforms from config import msra10k_path, MTDD_train_path from datasets import ImageFolder_joint from misc import AvgMeter, check_mkdir, cal_sc from model import R3Net, SDCNet from torch.backends import cudnn cudnn.benchmark = True torch.manual_seed(2021) torch.cuda.set_device(6) csv_path = './label_DUTS-TR.csv' ckpt_path = './ckpt' exp_name ='SDCNet' args = { 'iter_num': 30000, 'train_batch_size': 16, 'last_iter': 0, 'lr': 1e-3, 'lr_decay': 0.9, 'weight_decay': 5e-4, 'momentum': 0.9, 'snapshot': '' } joint_transform = joint_transforms.Compose([ joint_transforms.RandomCrop(300), joint_transforms.RandomHorizontallyFlip(), joint_transforms.RandomRotate(10) ]) img_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) target_transform = transforms.ToTensor() to_pil = transforms.ToPILImage() all_data = pd.read_csv(csv_path) train_set = ImageFolder_joint(all_data, joint_transform, img_transform, target_transform) train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True, drop_last=True)# log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt') def main(): net = SDCNet(num_classes = 5).cuda().train() # print('training in ' + exp_name) optimizer = optim.SGD([ {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'], 'lr': 2 * args['lr']}, {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'], 'lr': args['lr'], 'weight_decay': args['weight_decay']} ], momentum=args['momentum']) if len(args['snapshot']) > 0: print('training resumes from ' + args['snapshot']) net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth'))) optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth'))) optimizer.param_groups[0]['lr'] = 2 * args['lr'] optimizer.param_groups[1]['lr'] = args['lr'] check_mkdir(ckpt_path) check_mkdir(os.path.join(ckpt_path, exp_name)) open(log_path, 'w').write(str(args) + '\n\n') train(net, optimizer) def train(net, optimizer): start_time = time.time() curr_iter = args['last_iter'] num_class = [0, 0, 0, 0, 0] while True: total_loss_record, loss0_record, loss1_record, loss2_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter() batch_time = AvgMeter() end = time.time() print('-----begining the first stage, train_mode==0-----') for i, data in enumerate(train_loader): optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num'] ) ** args['lr_decay'] optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num'] ) ** args['lr_decay'] inputs, gt, labels = data print(labels) # depends on the num of classes cweight = torch.tensor([0.5, 0.75, 1, 1.25, 1.5]) #weight = torch.ones(size=gt.shape) weight = gt.clone().detach() sizec = labels.numpy() #ta = np.zeros(shape=gt.shape) ''' np.zeros(shape=labels.shape) sc = gt.clone().detach() for i in range(len(sizec)): gta = np.array(to_pil(sc[i,:].data.squeeze(0).cpu()))# #print(gta.shape) labels[i] = cal_sc(gta) sizec[i] = labels[i] print(labels) ''' batch_size = inputs.size(0) inputs = Variable(inputs).cuda() gt = Variable(gt).cuda() labels = Variable(labels).cuda() #print(sizec.shape) optimizer.zero_grad() p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 = net(inputs, sizec) # mode=1 criterion = nn.BCEWithLogitsLoss().cuda() criterion2 = nn.CrossEntropyLoss().cuda() gt2 = gt.long() gt2 = gt2.squeeze(1) l5 = criterion2(p5, gt2) l4 = criterion2(p4, gt2) l3 = criterion2(p3, gt2) l2 = criterion2(p2, gt2) l1 = criterion2(p1, gt2) loss0 = criterion(predict11, gt) loss10 = criterion(predict10, gt) loss9 = criterion(predict9, gt) loss8 = criterion(predict8, gt) loss7 = criterion(predict7, gt) loss6 = criterion(predict6, gt) loss5 = criterion(predict5, gt) loss4 = criterion(predict4, gt) loss3 = criterion(predict3, gt) loss2 = criterion(predict2, gt) loss1 = criterion(predict1, gt) total_loss = l1 + l2 + l3 + l4 + l5 + loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9 + loss10 total_loss.backward() optimizer.step() total_loss_record.update(total_loss.item(), batch_size) loss1_record.update(l5.item(), batch_size) loss0_record.update(loss0.item(), batch_size) curr_iter += 1.0 batch_time.update(time.time() - end) end = time.time() log = '[iter %d], [R1/Mode0], [total loss %.5f]\n' \ '[l5 %.5f], [loss0 %.5f]\n' \ '[lr %.13f], [time %.4f]' % \ (curr_iter, total_loss_record.avg, loss1_record.avg, loss0_record.avg, optimizer.param_groups[1]['lr'], batch_time.avg) print(log) print('Num of class:', num_class) open(log_path, 'a').write(log + '\n') if curr_iter == args['iter_num']: torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter)) torch.save(optimizer.state_dict(), os.path.join(ckpt_path, exp_name, '%d_optim.pth' % curr_iter)) total_time = time.time() - start_time print(total_time) return if __name__ == '__main__': main()
/count_dataset.py
import numpy as np import os import torch from PIL import Image from torch.autograd import Variable from torchvision import transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import pandas as pd from tqdm import tqdm path_list = ['msra10k', 'ECSSD', 'DUT-OMROM', 'DUTS-TR', 'DUTS-TE', 'HKU-IS', 'PASCAL-S', 'SED2', 'SOC', 'SOD', 'THUR-15K'] def main(): Dataset, Class0, Class1, Class2, Class3, Class4, Class5, Class6, Class7, Class8, Class9, Class10, Total = [], [], [], [], [], [], [], [], [], [], [], [], [] for data_path in path_list: test_path = './SOD_label/label_' + data_path + '.csv' print('Evalute for ' + test_path) test_data = pd.read_csv(test_path) imgs = [] num, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 for index, row in test_data.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) img_path, gt_path, label = imgs[index] if label == 0: c0 += 1 elif label == 1: c1 += 1 elif label == 2: c2 += 1 elif label == 3: c3 += 1 elif label == 4: c4 += 1 elif label == 5: c5 += 1 elif label == 6: c6 += 1 elif label == 7: c7 += 1 elif label == 8: c8 += 1 elif label == 9: c9 += 1 elif label == 10: c10 += 1 num += 1 print('[Class0 %.f], [Class1 %.f], [Class2 %.f], [Class3 %.f]\n'\ '[Class4 %.f], [Class5 %.f], [Class6 %.f], [Class7 %.f]\n'\ '[Class8 %.f], [Class9 %.f], [Class10 %.f], [Total %.f]\n'%\ (c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, num) ) Dataset.append(data_path) Class0.append(c0) Class1.append(c1) Class2.append(c2) Class3.append(c3) Class4.append(c4) Class5.append(c5) Class6.append(c6) Class7.append(c7) Class8.append(c8) Class9.append(c9) Class10.append(c10) Total.append(num) label_file = pd.DataFrame({'Datasets': Dataset, 'Class 0': Class0, 'Class 1': Class1, 'Class 2': Class2, 'Class 3': Class3, 'Class 4': Class4, 'Class 5': Class5, 'Class 6': Class6, 'Class 7': Class7, 'Class 8': Class8, 'Class 9': Class9, 'Class 10': Class10, 'Num of Pic': Total}) label_file = label_file[['Datasets', 'Class 0', 'Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6', 'Class 7', 'Class 8', 'Class 9', 'Class 10', 'Num of Pic']] label_file.to_csv('./Dataset_statistics.csv', index=False) if __name__ == '__main__': main()
/create_free.py
import numpy as np import os import torch from PIL import Image from torch.autograd import Variable from torchvision import transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import pandas as pd from tqdm import tqdm import cv2 import numpy as np from config import ecssd_path, hkuis_path, pascals_path, sod_path, dutomron_path, MTDD_test_path from misc import check_mkdir, crf_refine, AvgMeter, cal_precision_recall_mae, cal_fmeasure from datasets import TestFolder_joint import joint_transforms from model import HSNet_single1, HSNet_single1_ASPP, HSNet_single1_NR, HSNet_single2, SDMS_A, SDMS_C torch.manual_seed(2018) # set which gpu to use torch.cuda.set_device(0) ckpt_path = './ckpt' test_path = './test_ECSSD.csv' def main(): img = np.zeros((512, 512),dtype = np.uint8) img2 = cv2.imread('./0595.PNG', 0) cv2.imshow('img',img2) #cv2.waitKey(0) print(img, img2) Image.fromarray(img).save('./free.png') if __name__ == '__main__': main()
/datasets.py
import os import os.path import torch.utils.data as data from PIL import Image class ImageFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] self.label_list = label_list for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __len__(self): return len(self.label_list) def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label class ImageFolder_joint_for_edge(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') target_edge = Image.open(edge_path).convert('L') if self.joint_transform is not None: if img.size != target.size or img.size != target_edge.size: print("error path:", img_path, gt_path) print("size:", img.size, target.size, target_edge.size) img, target, target_edge = self.joint_transform(img, target, target_edge) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) target_edge = self.target_transform(target_edge) return img, target, target_edge, label def __len__(self): return len(self.imgs) class TestFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label, img_path def __len__(self): return len(self.imgs) def make_dataset(root): img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')] return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list] class ImageFolder(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, root, joint_transform=None, transform=None, target_transform=None): self.root = root self.imgs = make_dataset(root) self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.imgs)
/infer_SDCNet.py
import numpy as np import os import torch import torch.nn.functional as F from PIL import Image from torch.autograd import Variable from torchvision import transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import pandas as pd from tqdm import tqdm from misc import check_mkdir, AvgMeter, cal_precision_recall_mae, cal_fmeasure, cal_sizec, cal_sc from datasets import TestFolder_joint import joint_transforms from model import R3Net, SDCNet torch.manual_seed(2021) # set which gpu to use torch.cuda.set_device(6) # the following two args specify the location of the file of trained model (pth extension) # you should have the pth file in the folder './$ckpt_path$/$exp_name$' ckpt_path = './ckpt' exp_name = 'SDCNet' msra10k_path = './SOD_label/label_msra10k.csv' ecssd_path = './SOD_label/label_ECSSD.csv' dutomrom_path = './SOD_label/label_DUT-OMROM.csv' dutste_path = './SOD_label/label_DUTS-TE.csv' hkuis_path = './SOD_label/label_HKU-IS.csv' pascals_path = './SOD_label/label_PASCAL-S.csv' sed2_path = './SOD_label/label_SED2.csv' socval_path = './SOD_label/label_SOC-Val.csv' sod_path = './SOD_label/label_SOD.csv' thur15k_path = './SOD_label/label_THUR-15K.csv' args = { 'snapshot': '30000', # your snapshot filename (exclude extension name) 'save_results': True, # whether to save the resulting masks 'test_mode': 1 } joint_transform = joint_transforms.Compose([ #joint_transforms.RandomCrop(300), #joint_transforms.RandomHorizontallyFlip(), #joint_transforms.RandomRotate(10) ]) img_transform = transforms.Compose([ transforms.Resize((300, 300)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) target_transform = transforms.ToTensor() to_pil = transforms.ToPILImage() to_test ={'ECSSD': ecssd_path,'SOD': sod_path, 'DUTS-TE': dutste_path} #{'DUTS-TE': dutste_path,'ECSSD': ecssd_path,'SOD': sod_path, 'SED2': sed2_path, 'PASCAL-S': pascals_path, 'HKU-IS': hkuis_path, 'DUT-OMROM': dutomrom_path} def main(): net = SDCNet(num_classes = 5).cuda() print('load snapshot \'%s\' for testing, mode:\'%s\'' % (args['snapshot'], args['test_mode'])) print(exp_name) net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth'))) net.eval() results = {} with torch.no_grad(): for name, root in to_test.items(): print('load snapshot \'%s\' for testing %s' %(args['snapshot'], name)) test_data = pd.read_csv(root) test_set = TestFolder_joint(test_data, joint_transform, img_transform, target_transform) test_loader = DataLoader(test_set, batch_size=1, num_workers=0, shuffle=False) precision0_record, recall0_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision1_record, recall1_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision2_record, recall2_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision3_record, recall3_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision4_record, recall4_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision5_record, recall5_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision6_record, recall6_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] mae0_record = AvgMeter() mae1_record = AvgMeter() mae2_record = AvgMeter() mae3_record = AvgMeter() mae4_record = AvgMeter() mae5_record = AvgMeter() mae6_record = AvgMeter() n0, n1, n2, n3, n4, n5 = 0, 0, 0, 0, 0, 0 if args['save_results']: check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (name, args['snapshot']))) for i, (inputs, gt, labels, img_path) in enumerate(tqdm(test_loader)): shape = gt.size()[2:] img_var = Variable(inputs).cuda() img = np.array(to_pil(img_var.data.squeeze(0).cpu())) gt = np.array(to_pil(gt.data.squeeze(0).cpu())) sizec = labels.numpy() pred2021 = net(img_var, sizec) pred2021 = F.interpolate(pred2021, size=shape, mode='bilinear', align_corners=True) pred2021 = np.array(to_pil(pred2021.data.squeeze(0).cpu())) if labels == 0: precision1, recall1, mae1 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision1, recall1)): p, r = pdata precision1_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall1_record[pidx].update(r) mae1_record.update(mae1) n1 += 1 elif labels == 1: precision2, recall2, mae2 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision2, recall2)): p, r = pdata precision2_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall2_record[pidx].update(r) mae2_record.update(mae2) n2 += 1 elif labels == 2: precision3, recall3, mae3 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision3, recall3)): p, r = pdata precision3_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall3_record[pidx].update(r) mae3_record.update(mae3) n3 += 1 elif labels == 3: precision4, recall4, mae4 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision4, recall4)): p, r = pdata precision4_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall4_record[pidx].update(r) mae4_record.update(mae4) n4 += 1 elif labels == 4: precision5, recall5, mae5 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision5, recall5)): p, r = pdata precision5_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall5_record[pidx].update(r) mae5_record.update(mae5) n5 += 1 precision6, recall6, mae6 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision6, recall6)): p, r = pdata precision6_record[pidx].update(p) recall6_record[pidx].update(r) mae6_record.update(mae6) img_name = os.path.split(str(img_path))[1] img_name = os.path.splitext(img_name)[0] n0 += 1 if args['save_results']: Image.fromarray(pred2021).save(os.path.join(ckpt_path, exp_name, '%s_%s' % ( name, args['snapshot']), img_name + '_2021.png')) fmeasure1 = cal_fmeasure([precord.avg for precord in precision1_record], [rrecord.avg for rrecord in recall1_record]) fmeasure2 = cal_fmeasure([precord.avg for precord in precision2_record], [rrecord.avg for rrecord in recall2_record]) fmeasure3 = cal_fmeasure([precord.avg for precord in precision3_record], [rrecord.avg for rrecord in recall3_record]) fmeasure4 = cal_fmeasure([precord.avg for precord in precision4_record], [rrecord.avg for rrecord in recall4_record]) fmeasure5 = cal_fmeasure([precord.avg for precord in precision5_record], [rrecord.avg for rrecord in recall5_record]) fmeasure6 = cal_fmeasure([precord.avg for precord in precision6_record], [rrecord.avg for rrecord in recall6_record]) results[name] = {'fmeasure1': fmeasure1, 'mae1': mae1_record.avg,'fmeasure2': fmeasure2, 'mae2': mae2_record.avg, 'fmeasure3': fmeasure3, 'mae3': mae3_record.avg, 'fmeasure4': fmeasure4, 'mae4': mae4_record.avg, 'fmeasure5': fmeasure5, 'mae5': mae5_record.avg, 'fmeasure6': fmeasure6, 'mae6': mae6_record.avg} print('test results:') print('[fmeasure1 %.3f], [mae1 %.4f], [class1 %.0f]\n'\ '[fmeasure2 %.3f], [mae2 %.4f], [class2 %.0f]\n'\ '[fmeasure3 %.3f], [mae3 %.4f], [class3 %.0f]\n'\ '[fmeasure4 %.3f], [mae4 %.4f], [class4 %.0f]\n'\ '[fmeasure5 %.3f], [mae5 %.4f], [class5 %.0f]\n'\ '[fmeasure6 %.3f], [mae6 %.4f], [all %.0f]\n'%\ (fmeasure1, mae1_record.avg, n1, fmeasure2, mae2_record.avg, n2, fmeasure3, mae3_record.avg, n3, fmeasure4, mae4_record.avg, n4, fmeasure5, mae5_record.avg, n5, fmeasure6, mae6_record.avg, n0)) def accuracy(y_pred, y_actual, topk=(1,)): """Computes the precision@k for the specified values of k""" final_acc = 0 maxk = max(topk) # for prob_threshold in np.arange(0, 1, 0.01): PRED_COUNT = y_actual.size(0) PRED_CORRECT_COUNT = 0 prob, pred = y_pred.topk(maxk, 1, True, True) # prob = np.where(prob > prob_threshold, prob, 0) for j in range(pred.size(0)): if int(y_actual[j]) == int(pred[j]): PRED_CORRECT_COUNT += 1 if PRED_COUNT == 0: final_acc = 0 else: final_acc = float(PRED_CORRECT_COUNT / PRED_COUNT) return final_acc * 100, PRED_COUNT if __name__ == '__main__': main()
/misc.py
import numpy as np import os import pylab as pl #import pydensecrf.densecrf as dcrf class AvgMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def check_mkdir(dir_name): if not os.path.exists(dir_name): os.mkdir(dir_name) def cal_precision_recall_mae(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 prediction = prediction / 255. gt = gt / 255. mae = np.mean(np.abs(prediction - gt)) hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds for threshold in range(256): threshold = threshold / 255. hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 #false_pred = np.zeros(prediction.shape) #false_prediction[prediction < threshold] = 1 a = prediction.shape tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #for roc #fp = np.sum(false_pred * hard_gt) #tpr = (tp + eps)/(a + eps) fp = p - tp #TPR.append(tpr) FP.append(fp) precision.append((tp + eps) / (p + eps)) recall.append((tp + eps) / (t + eps)) return precision, recall, mae#, TPR, FP def cal_fmeasure(precision, recall): assert len(precision) == 256 assert len(recall) == 256 beta_square = 0.3 max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)]) return max_fmeasure def cal_sizec(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 #print(gt.shape) prediction = prediction / 255. gt = gt / 255. hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds best_threshold = 0 best_F = 0 for threshold in range(256): threshold = threshold / 255. gt_size = np.ones(prediction.shape) a = np.sum(gt_size) hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #print(a, p) precision = (tp + eps) / (p + eps) recall = (tp + eps) / (t + eps) beta_square = 0.3 fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall) if fmeasure > best_F: best_threshold = threshold*255 best_F = fmeasure sm_size = p / a if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec, best_threshold#, TPR, FP def cal_sc(gt): # input should be np array with data type uint8 assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. #print(gt.shape) img_size = np.ones(gt.shape) a = np.sum(img_size) hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 p = np.sum(hard_gt) b = np.sum(gt) sm_size = float(p) / float(a) #print(p, a, sm_size, b) #print(gt) if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec def pr_cruve(precision, recall): assert len(precision) == 256 assert len(recall) == 256 r = [a[1] for a in zip(precision, recall)] p = [a[0] for a in zip(precision, recall)] pl.title('PR curve') pl.xlabel('Recall') pl.xlabel('Precision') pl.plot(r, p) pl.show() # for define the size type of the salient object def size_aware(gt): assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) pic = np.size(hard_gt) rate = t/pic return rate # # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf # def crf_refine(img, annos): # def _sigmoid(x): # return 1 / (1 + np.exp(-x)) # assert img.dtype == np.uint8 # assert annos.dtype == np.uint8 # assert img.shape[:2] == annos.shape # # img and annos should be np array with data type uint8 # EPSILON = 1e-8 # M = 2 # salient or not # tau = 1.05 # # Setup the CRF model # d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M) # anno_norm = annos / 255. # n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm)) # p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm)) # U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32') # U[0, :] = n_energy.flatten() # U[1, :] = p_energy.flatten() # d.setUnaryEnergy(U) # d.addPairwiseGaussian(sxy=3, compat=3) # d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5) # # Do the inference # infer = np.array(d.inference(1)).astype('float32') # res = infer[1, :] # res = res * 255 # res = res.reshape(img.shape[:2]) # return res.astype('uint8')
/model.py
import torch import torch.nn.functional as F from torch import nn from resnext import ResNeXt101 class R3Net(nn.Module): def __init__(self): super(R3Net, self).__init__() res50 = ResNeXt101() self.layer0 = res50.layer0 self.layer1 = res50.layer1 self.layer2 = res50.layer2 self.layer3 = res50.layer3 self.layer4 = res50.layer4 self.reduce_low = nn.Sequential( nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce_high = nn.Sequential( nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), _ASPP(256) ) self.predict0 = nn.Conv2d(256, 1, kernel_size=1) self.predict1 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict2 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict3 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict4 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict5 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict6 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) for m in self.modules(): if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout): m.inplace = True def forward(self, x, label = None): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) l0_size = layer0.size()[2:] reduce_low = self.reduce_low(torch.cat(( layer0, F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True), F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1)) reduce_high = self.reduce_high(torch.cat(( layer3, F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1)) reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True) predict0 = self.predict0(reduce_high) predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0 predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1 predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2 predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3 predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4 predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5 predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True) predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True) predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True) predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True) predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True) predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True) predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True) if self.training: return predict0, predict1, predict2, predict3, predict4, predict5, predict6 return F.sigmoid(predict6) #-------------------------------------------------------------------------------------------- class SDCNet(nn.Module): def __init__(self, num_classes): super(SDCNet, self).__init__() res50 = ResNeXt101() self.layer0 = res50.layer0 self.layer1 = res50.layer1 self.layer2 = res50.layer2 self.layer3 = res50.layer3 self.layer4 = res50.layer4 self.reducex = nn.Sequential( nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), _ASPP(256) ) self.reduce5 = nn.Sequential( nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce6 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce7 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce8 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce9 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce10 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) # --------------extra module--------------- self.reduce3_0 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_1 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_2 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_3 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_4 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_0 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_1 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_2 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_3 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_4 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_0 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_1 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_2 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_3 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_4 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_0 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_1 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_2 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_3 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_4 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) # self.predict0 = nn.Conv2d(256, 1, kernel_size=1) self.predict1 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict3 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict4 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict5 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict6 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict7 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict8 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict9 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict10 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.pre4 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre3 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre1 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.reducex_1 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reducex_2 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reducex_3 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) for m in self.modules(): if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout): m.inplace = True self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc0 = nn.Sequential( nn.BatchNorm1d(256), nn.Dropout(0.5), nn.Linear(256, num_classes), ) def forward(self, x, c): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) l0_size = layer0.size()[2:] l1_size = layer1.size()[2:] l2_size = layer2.size()[2:] l3_size = layer3.size()[2:] F1 = self.reducex(layer4) p4 = self.pre4(F1) p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True) F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True) F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1)) p3 = self.pre3(F0_3) p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True) F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True) F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1)) p2 = self.pre2(F0_2) p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True) F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True) F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1)) p1 = self.pre1(F0_1) p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True) p5 = p4 + p3 + p2 + p1 #saliency detect predict1 = self.predict1(F1) predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True) F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True) F2 = F1[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F2[i, :, :, :] = self.reduce3_0( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F2[i, :, :, :] = self.reduce3_1( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F2[i, :, :, :] = self.reduce3_2( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F2[i, :, :, :] = self.reduce3_3( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F2[i, :, :, :] = self.reduce3_4( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) predict2 = self.predict2(F2) + predict1 predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True) F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True) F3 = F2[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F3[i, :, :, :] = self.reduce2_0( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F3[i, :, :, :] = self.reduce2_1( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F3[i, :, :, :] = self.reduce2_2( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F3[i, :, :, :] = self.reduce2_3( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F3[i, :, :, :] = self.reduce2_4( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) predict3 = self.predict3(F3) + predict2 predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True) F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True) F4 = F3[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F4[i, :, :, :] = self.reduce1_0( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F4[i, :, :, :] = self.reduce1_1( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F4[i, :, :, :] = self.reduce1_2( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F4[i, :, :, :] = self.reduce1_3( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F4[i, :, :, :] = self.reduce1_4( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) predict4 = self.predict4(F4) + predict3 F5 = self.reduce5(torch.cat((F4, layer0), 1)) predict5 = self.predict5(F5) + predict4 F0 = F4[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 1: F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 2: F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 3: F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 4: F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0)) F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True) F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True) F6 = self.reduce6(torch.cat((F0, F5), 1)) F7 = self.reduce7(torch.cat((F0, F4), 1)) F8 = self.reduce8(torch.cat((F0, F3), 1)) F9 = self.reduce9(torch.cat((F0, F2), 1)) F10 = self.reduce10(torch.cat((F0, F1), 1)) predict6 = self.predict6(F6) + predict5 predict7 = self.predict7(F7) + predict6 predict8 = self.predict8(F8) + predict7 predict9 = self.predict9(F9) + predict8 predict10 = self.predict10(F10) + predict9 predict11 = predict6 + predict7 + predict8 + predict9 + predict10 predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True) predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True) predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True) predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True) predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True) predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True) predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True) predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True) predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True) predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True) predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True) if self.training: return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 return F.sigmoid(predict11) #---------------------------------------------------------------------------------------- class _ASPP(nn.Module): def __init__(self, in_dim): super(_ASPP, self).__init__() down_dim = in_dim // 2 self.conv1 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv2 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv3 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv4 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv5 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.fuse = nn.Sequential( nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU() ) def forward(self, x): conv1 = self.conv1(x) conv2 = self.conv2(x) conv3 = self.conv3(x) conv4 = self.conv4(x) conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear', align_corners=True) return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1))
/model/backbones/resnet.py
import math import torch from torch import nn def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class GDN_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(GDN_Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1_0 = nn.BatchNorm2d( planes, affine=False, track_running_stats=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2_0 = nn.BatchNorm2d( planes, affine=False, track_running_stats=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3_0 = nn.BatchNorm2d( planes * 4, affine=False, track_running_stats=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.in1 = nn.InstanceNorm2d(planes) self.in2 = nn.InstanceNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out1 = torch.zeros_like(out) if self.training == True: #print("training with gdn block") out1[:8] = self.bn1_0(out[:8]) out1[8:16] = self.bn1_0(out[8:16]) out1[16:] = self.bn1_0(out[16:]) else: #print("test for gdn block") out1 = self.in1(out) out = self.bn1(out1) out = self.relu(out) out = self.conv2(out) out1 = torch.zeros_like(out) if self.training == True: out1[:8] = self.bn2_0(out[:8]) out1[8:16] = self.bn2_0(out[8:16]) out1[16:] = self.bn2_0(out[16:]) else: out1 = self.in1(out) out = self.bn2(out1) out = self.relu(out) out = self.conv3(out) out1 = torch.zeros_like(out) if self.training == True: out1[:8] = self.bn3_0(out[:8]) out1[8:16] = self.bn3_0(out[8:16]) out1[16:] = self.bn3_0(out[16:]) else: out1 = self.in2(out) out = self.bn3(out1) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class IN_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(IN_Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.in1_0 = nn.InstanceNorm2d(planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.in2_0 = nn.InstanceNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.in3_0 = nn.InstanceNorm2d(planes * 4) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.in1_0(out) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.in2_0(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.in3_0(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class IN2_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(IN2_Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.in1_0 = nn.InstanceNorm2d(planes) self.conv1_1 = nn.Sequential( nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True) ) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.in2_0 = nn.InstanceNorm2d(planes) self.conv2_1 = nn.Sequential( nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True) ) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.in3_0 = nn.InstanceNorm2d(planes * 4) self.conv3_1 = nn.Sequential( nn.Conv2d(planes * 8, planes * 4, kernel_size=1, bias=False), nn.BatchNorm2d(planes * 4) ) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x x1 = self.conv1(x) out1 = self.in1_0(x1) out1 = self.bn1(out1) out1 = self.relu(out1) x1 = self.conv1_1(torch.cat((out1,x1),1)) x2 = self.conv2(x1) out2 = self.in2_0(x2) out2 = self.bn2(out2) out2 = self.relu(out2) x2 = self.conv2_1(torch.cat((out2,x2),1)) x3 = self.conv3(x2) out3 = self.in3_0(x3) out3 = self.bn3(out3) out3 = self.relu(out3) x3 = self.conv3_1(torch.cat((out3,x3),1)) if self.downsample is not None: residual = self.downsample(residual) x3 += residual x3 = self.relu(x3) return x3 class SNR_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(SNR_Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.in1_0 = nn.InstanceNorm2d(planes) self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn1_1 = nn.BatchNorm2d(planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.in2_0 = nn.InstanceNorm2d(planes) self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn2_1 = nn.BatchNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.in3_0 = nn.InstanceNorm2d(planes * 4) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x x1 = self.conv1(x) out1 = self.in1_0(x1) res1 = x1 - out1 res1 = self.conv1_1(res1) res1 = self.bn1_1(res1) res1 = self.relu(res1) x1 = self.bn1(x1) x1 = out1 + res1 x1 = self.relu(x1) x2 = self.conv2(x1) out2 = self.in2_0(x2) res2 = x2 - out2 res2 = self.conv2_1(res2) res2 = self.bn2_1(res2) res2 = self.relu(res2) x2 = self.bn2(x2) x2 = out2 + res2 x2 = self.relu(x2) x3 = self.conv3(x2) x3 = self.bn3(x3) if self.downsample is not None: residual = self.downsample(residual) x3 += residual x3 = self.relu(x3) return x3 class SNR2_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(SNR2_Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.in1_0 = nn.InstanceNorm2d(planes) self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn1_1 = nn.BatchNorm2d(planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.in2_0 = nn.InstanceNorm2d(planes) self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn2_1 = nn.BatchNorm2d(planes) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.in3_0 = nn.InstanceNorm2d(planes * 4) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) def forward(self, x): residual = x x1 = self.conv1(x) out1 = self.in1_0(x1) res1 = x1 - out1 res1 = self.conv1_1(res1) res1 = self.bn1_1(res1) res1 = self.relu(res1) x1 = out1 + res1 x1 = self.bn1(x1) x1 = self.relu(x1) x2 = self.conv2(x1) out2 = self.in2_0(x2) if self.stride == 2: res1 = self.maxpool(res1) res2 = x2 - out2 + res1 res2 = self.conv2_1(res2) res2 = self.bn2_1(res2) res2 = self.relu(res2) x2 = out2 + res2 x2 = self.bn2(x2) x2 = self.relu(x2) x3 = self.conv3(x2) x3 = self.bn3(x3) if self.downsample is not None: residual = self.downsample(residual) x3 += residual x3 = self.relu(x3) return x3 class SNR3_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(SNR3_Bottleneck, self).__init__() self.in1 = nn.InstanceNorm2d(planes) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn1_1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn2_1 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.in3 = nn.InstanceNorm2d(planes * 4) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) def forward(self, x, x_2=None, x_1=None, r2=None, r1=None): if type(x) is tuple: # print(len(x)) x_2 = x[1] x_1 = x[2] r2 = x[3] r1 = x[4] x = x[0] residual = x x1 = self.conv1(x) out1 = self.in1(x1) res1 = x1 - out1 res1 = self.conv1_1(res1) res1 = self.bn1_1(res1) res1 = self.relu(res1) # print(out1.shape) # print(res1.shape) # print(x1.shape) x1 = out1 + res1 x1 = self.bn1(x1) x1 = self.relu(x1) x2 = self.conv2(x1) out2 = self.in1(x2) res2 = x2 - out2 res2 = self.conv2_1(res2) res2 = self.bn2_1(res2) res2 = self.relu(res2) x2 = out2 + res2 x2 = self.bn2(x2) x2 = self.relu(x2) x3 = self.conv3(x2) x3 = self.bn3(x3) if self.downsample is not None: residual = self.downsample(residual) x3 += residual x3 = self.relu(x3) if x_2 is not None: x2 = x2 + x_2 if x_1 is not None: x1 = x1 + x_1 if r2 is not None: res2 = res2 + r2 if r1 is not None: res1 = res1 + r1 ''' print(x3.shape) print(x2.shape) print(x1.shape) print(res2.shape) print(res1.shape) ''' if self.stride == 2: x1 = self.maxpool(x1) res1 = self.maxpool(res1) return x3, x2, x1, res2, res1 class SNR4_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(SNR4_Bottleneck, self).__init__() self.in1 = nn.InstanceNorm2d(planes) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn1_1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) self.bn2_1 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.in3 = nn.InstanceNorm2d(planes * 4) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) def forward(self, x, x_2=None, x_1=None, r2=None, r1=None): if type(x) is tuple: # print(len(x)) x_2 = x[1] x_1 = x[2] r2 = x[3] r1 = x[4] x = x[0] residual = x x1 = self.conv1(x) out1 = self.in1(x1) res1 = x1 - out1 res1 = self.conv1_1(res1) res1 = self.bn1_1(res1) res1 = self.relu(res1) # print(out1.shape) # print(res1.shape) # print(x1.shape) x1 = out1 + res1 x1 = self.bn1(x1) x1 = self.relu(x1) x2 = self.conv2(x1) out2 = self.in1(x2) res2 = x2 - out2 res2 = self.conv2_1(res2) res2 = self.bn2_1(res2) res2 = self.relu(res2) x2 = out2 + res2 x2 = self.bn2(x2) x2 = self.relu(x2) x3 = self.conv3(x2) x3 = self.bn3(x3) if self.downsample is not None: residual = self.downsample(residual) x3 += residual x3 = self.relu(x3) if x_2 is not None: x2 = x2 + x_2 if x_1 is not None: x1 = x1 + x_1 if r2 is not None: res2 = res2 + r2 if r1 is not None: res1 = res1 + r1 ''' print(x3.shape) print(x2.shape) print(x1.shape) print(res2.shape) print(res1.shape) ''' if self.stride == 2: x1 = self.maxpool(x1) res1 = self.maxpool(res1) return x3, x2, x1, res2, res1 # --------------------------------- resnet----------------------------------- class ResNet(nn.Module): def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]): self.inplanes = 64 super().__init__() print(block) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) # self.relu = nn.ReLU(inplace=True) # add missed relu self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) self.frozen_stages = frozen_stages self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer( block, 512, layers[3], stride=last_stride) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, 'layer{}'.format(i)) print('layer{}'.format(i)) m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x, camid=None): x = self.conv1(x) x = self.bn1(x) # x = self.relu(x) # add missed relu x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def load_param(self, model_path): param_dict = torch.load(model_path) for i in param_dict: if 'fc' in i: continue self.state_dict()[i].copy_(param_dict[i]) def random_init(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # ---------------------------------Comb resnet----------------------------------- class Comb_ResNet(nn.Module): def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]): self.inplanes = 64 super().__init__() print(block) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.in1 = nn.InstanceNorm2d(64) self.bn1_1 = nn.BatchNorm2d(64) self.conv2 = nn.Sequential( nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=1) ) self.in2 = nn.InstanceNorm2d(256) self.bn2_1 = nn.BatchNorm2d(256) self.conv3 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, kernel_size=1) ) self.in3 = nn.InstanceNorm2d(512) self.bn3_1 = nn.BatchNorm2d(512) self.conv4 = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, kernel_size=1) ) self.in4 = nn.InstanceNorm2d(1024) self.bn4_1 = nn.BatchNorm2d(1024) self.conv5 = nn.Sequential( nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(), nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(), nn.Conv2d(1024, 1024, kernel_size=1) ) self.relu = nn.ReLU(inplace=True) # add missed relu self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) self.frozen_stages = frozen_stages self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer( block, 512, layers[3], stride=last_stride) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, 'layer{}'.format(i)) print('layer{}'.format(i)) m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x, camid=None): x = self.conv1(x) x = self.bn1(x) # x = self.relu(x) # add missed relu x = self.maxpool(x) xin = self.in1(x) xin = self.bn1_1(xin) xin = self.relu(xin) x = self.conv2(torch.cat((xin,x),1)) x = self.layer1(x) xin = self.in2(x) xin = self.bn2_1(xin) xin = self.relu(xin) x = self.conv3(torch.cat((xin,x),1)) x = self.layer2(x) xin = self.in3(x) xin = self.bn3_1(xin) xin = self.relu(xin) x = self.conv4(torch.cat((xin,x),1)) x = self.layer3(x) xin = self.in4(x) xin = self.bn4_1(xin) xin = self.relu(xin) x = self.conv5(torch.cat((xin,x),1)) x = self.layer4(x) return x def load_param(self, model_path): param_dict = torch.load(model_path) for i in param_dict: if 'fc' in i: continue self.state_dict()[i].copy_(param_dict[i]) def random_init(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # ---------------------------------Pure resnet----------------------------------- class Pure_ResNet(nn.Module): def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]): self.inplanes = 64 super().__init__() print(block) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) # self.relu = nn.ReLU(inplace=True) # add missed relu self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) self.frozen_stages = frozen_stages self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer( block, 512, layers[3], stride=last_stride) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, 'layer{}'.format(i)) print('layer{}'.format(i)) m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x, camid=None): x = self.conv1(x) x = self.bn1(x) #print(camid) # x = self.relu(x) # add missed relu x = self.maxpool(x) if False: x,_,_,_,_ = self.layer1(x) x,_,_,_,_ = self.layer2(x) x,_,_,_,_ = self.layer3(x) x,_,_,_,_ = self.layer4(x) else: x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def load_param(self, model_path): param_dict = torch.load(model_path) for i in param_dict: if 'fc' in i: continue self.state_dict()[i].copy_(param_dict[i]) def random_init(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # ---------------------------------jointin resnet----------------------------------- class Jointin_ResNet(nn.Module): def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]): self.inplanes = 64 super().__init__() print(block) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.conv1_1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.bn1_1 = nn.BatchNorm2d(64) self.in1 = nn.InstanceNorm2d(64) # self.relu = nn.ReLU(inplace=True) # add missed relu self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) self.frozen_stages = frozen_stages self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer( block, 512, layers[3], stride=last_stride) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, 'layer{}'.format(i)) print('layer{}'.format(i)) m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x, camid=None): x = self.conv1(x) x0 = self.in1(x) ''' res0 = x - x0 res0 = self.conv1_1(res0) res0 = self.bn1_1(res0) x0 = x0 + res0 ''' x0 = self.bn1(x0) # x = self.relu(x) # add missed relu x0 = self.maxpool(x0) x1_3, x1_2, x1_1, res1_2, res1_1 = self.layer1(x0) x2_3, x2_2, x2_1, res2_2, res2_1 = self.layer2(x1_3) x3_3, x3_2, x3_1, res3_2, res3_1 = self.layer3(x2_3) x4_3, x4_2, x4_1, res4_2, res4_1 = self.layer4(x3_3) if self.training: return x4_3, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1 else: return x4_3 def load_param(self, model_path): param_dict = torch.load(model_path) for i in param_dict: if 'fc' in i: continue self.state_dict()[i].copy_(param_dict[i]) def random_init(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # ---------------------------------jointout resnet----------------------------------- class Jointout_ResNet(nn.Module): def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]): self.inplanes = 64 super().__init__() print(block) self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.conv1_res = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True), nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True), nn.Conv2d(64, 64, kernel_size=1) ) self.in1 = nn.InstanceNorm2d(64) self.bn1 = nn.BatchNorm2d(64) self.bn1_1 = nn.BatchNorm2d(64) self.in2 = nn.InstanceNorm2d(256) self.bn2_1 = nn.BatchNorm2d(256) self.bn2_0 = nn.BatchNorm2d(256) self.in3 = nn.InstanceNorm2d(512) self.bn3_1 = nn.BatchNorm2d(512) self.bn3_0 = nn.BatchNorm2d(512) self.in4 = nn.InstanceNorm2d(1024) self.bn4_1 = nn.BatchNorm2d(1024) self.bn4_0 = nn.BatchNorm2d(1024) self.in5 = nn.InstanceNorm2d(2048) self.bn5_1 = nn.BatchNorm2d(2048) self.bn5_0 = nn.BatchNorm2d(2048) self.relu = nn.ReLU(inplace=True) # add missed relu self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) self.frozen_stages = frozen_stages self.layer1 = self._make_layer(block, 64, layers[0]) self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False) self.conv2_res = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True), nn.Conv2d(128, 256, kernel_size=1) ) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.conv3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False) self.conv3_res = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True), nn.Conv2d(256, 512, kernel_size=1) ) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.conv4 = nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1, bias=False) self.conv4_res = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True), nn.Conv2d(512, 1024, kernel_size=1) ) self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride) self.conv5 = nn.Conv2d(2048, 2048, kernel_size=3, stride=1, padding=1, bias=False) self.conv5_res = nn.Sequential( nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True), nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True), nn.Conv2d(1024, 2048, kernel_size=1) ) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _freeze_stages(self): if self.frozen_stages >= 0: self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, 'layer{}'.format(i)) print('layer{}'.format(i)) m.eval() for param in m.parameters(): param.requires_grad = False def forward(self, x, camid=None): x = self.conv1(x) x0 = self.in1(x) res0 = x - x0 x0 = self.bn1(x0) x0 = self.relu(x0) res0 = self.conv1_res(res0) x0 = x0 + res0 x0 = self.bn1_1(x0) # x = self.relu(x) # add missed relu x0 = self.maxpool(x0) x1 = self.layer1(x0) px1 = self.conv2(x1) x1 = self.in2(px1) res1 = px1 - x1 x1 = self.bn2_0(x1) x1 = self.relu(x1) res1 = self.conv2_res(res1) x1 = x1 + res1 x1 = self.bn2_1(x1) x1 = self.relu(x1) x2 = self.layer2(x1) px2 = self.conv3(x2) x2 = self.in3(px2) res2 = px2 - x2 x2 = self.bn3_0(x2) x2 = self.relu(x2) res2 = self.conv3_res(res2) x2 = x2 + res2 x2 = self.bn3_1(x2) x2 = self.relu(x2) x3 = self.layer3(x2) px3 = self.conv4(x3) x3 = self.in4(px3) res3 = px3 - x3 x3 = self.bn4_0(x3) x3 = self.relu(x3) res3 = self.conv4_res(res3) x3 = x3 + res3 x3 = self.bn4_1(x3) x3 = self.relu(x3) x4 = self.layer4(x3) px4 = self.conv5(x4) x4 = self.in5(px4) res4 = px4 - x4 x4 = self.bn5_0(x4) x4 = self.relu(x4) res4 = self.conv5_res(res4) x4 = x4 + res4 x4 = self.bn5_1(x4) x4 = self.relu(x4) if self.training: return x0, x1, x2, x3, x4, res0, res1, res2, res3, res4 else: return x4 def load_param(self, model_path): param_dict = torch.load(model_path) for i in param_dict: if 'fc' in i: continue self.state_dict()[i].copy_(param_dict[i]) def random_init(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
/model/make_model.py
import torch import torch.nn as nn from .backbones.resnet import ResNet, Comb_ResNet, Pure_ResNet, Jointin_ResNet, Jointout_ResNet, BasicBlock, Bottleneck, GDN_Bottleneck, IN_Bottleneck, IN2_Bottleneck, SNR_Bottleneck, SNR2_Bottleneck, SNR3_Bottleneck from loss.arcface import ArcFace from .backbones.resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a from .backbones.se_resnet_ibn_a import se_resnet50_ibn_a, se_resnet101_ibn_a import torch.nn.functional as F model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') nn.init.constant_(m.bias, 0.0) elif classname.find('Conv') != -1: nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') if m.bias is not None: nn.init.constant_(m.bias, 0.0) elif classname.find('BatchNorm') != -1: if m.affine: nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0) def weights_init_classifier(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: nn.init.normal_(m.weight, std=0.001) if m.bias: nn.init.constant_(m.bias, 0.0) class Backbone(nn.Module): def __init__(self, num_classes, cfg): super(Backbone, self).__init__() last_stride = cfg.MODEL.LAST_STRIDE model_path = cfg.MODEL.PRETRAIN_PATH model_name = cfg.MODEL.NAME self.model_name = cfg.MODEL.NAME pretrain_choice = cfg.MODEL.PRETRAIN_CHOICE #block = cfg.MODEL.BLOCK self.cos_layer = cfg.MODEL.COS_LAYER self.neck = cfg.MODEL.NECK self.neck_feat = cfg.TEST.NECK_FEAT if model_name == 'Pure_resnet50_GDN': self.in_planes = 2048 self.base = ResNet(last_stride=last_stride, block=GDN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Comb_resnet50_IN': self.in_planes = 2048 self.base = Comb_ResNet(last_stride=last_stride, block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Pure_resnet50_IN2': self.in_planes = 2048 self.base = Pure_ResNet(last_stride=last_stride, block=IN2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # elif model_name == 'Pure_resnet50_IN': self.in_planes = 2048 self.base = Pure_ResNet(last_stride=last_stride, block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Pure_resnet50_SNR': self.in_planes = 2048 self.base = Pure_ResNet(last_stride=last_stride, block=SNR_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Pure_resnet50_SNR2': self.in_planes = 2048 self.base = Pure_ResNet(last_stride=last_stride, block=SNR2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Jointin_resnet50_SNR3': self.in_planes = 2048 self.base = Jointin_ResNet(last_stride=last_stride, block=SNR3_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Jointout_resnet50_None': self.in_planes = 2048 self.base = Jointout_ResNet(last_stride=last_stride, block=Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'Jointout_resnet50_IN': self.in_planes = 2048 self.base = Jointout_ResNet(last_stride=last_stride, block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) # print('using resnet50 as a backbone') print(self.base) elif model_name == 'resnet18': self.in_planes = 512 self.base = ResNet(last_stride=last_stride, block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN, layers=[2, 2, 2, 2]) print('using resnet18 as a backbone') elif model_name == 'resnet34': self.in_planes = 512 self.base = ResNet(last_stride=last_stride, block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN, layers=[3, 4, 6, 3]) print('using resnet34 as a backbone') elif model_name == 'resnet50_ibn_a': self.in_planes = 2048 self.base = resnet50_ibn_a(last_stride) print('using se_resnet50_ibn_a as a backbone') elif model_name == 'se_resnet50_ibn_a': self.in_planes = 2048 self.base = se_resnet50_ibn_a( last_stride, frozen_stages=cfg.MODEL.FROZEN) print('using se_resnet50_ibn_a as a backbone') elif model_name == 'resnet101_ibn_a': self.in_planes = 2048 self.base = resnet101_ibn_a( last_stride, frozen_stages=cfg.MODEL.FROZEN) print('using resnet101_ibn_a as a backbone') elif model_name == 'se_resnet101_ibn_a': self.in_planes = 2048 self.base = se_resnet101_ibn_a( last_stride, frozen_stages=cfg.MODEL.FROZEN) print('using se_resnet101_ibn_a as a backbone') else: print('unsupported backbone! but got {}'.format(model_name)) if pretrain_choice == 'imagenet': self.base.load_param(model_path) print('Loading pretrained ImageNet model......from {}'.format(model_path)) self.gap = nn.AdaptiveAvgPool2d(1) self.num_classes = num_classes if self.cos_layer: print('using cosine layer') self.arcface = ArcFace( self.in_planes, self.num_classes, s=30.0, m=0.50) else: self.classifier = nn.Linear( self.in_planes, self.num_classes, bias=False) self.classifier.apply(weights_init_classifier) if model_name == 'Jointin_resnet50_SNR3': self.classifier = nn.Linear( self.in_planes, self.num_classes, bias=False) self.classifier.apply(weights_init_classifier) self.classifier1 = nn.Linear(512, self.num_classes, bias=False) self.classifier1.apply(weights_init_classifier) self.classifier2 = nn.Linear(512, self.num_classes, bias=False) self.classifier2.apply(weights_init_classifier) self.classifier3 = nn.Linear(512, self.num_classes, bias=False) self.classifier3.apply(weights_init_classifier) self.classifier4 = nn.Linear(512, self.num_classes, bias=False) self.classifier4.apply(weights_init_classifier) self.classifier5 = nn.Linear(1024, self.num_classes, bias=False) self.classifier5.apply(weights_init_classifier) self.classifier6 = nn.Linear(256, self.num_classes, bias=False) self.classifier6.apply(weights_init_classifier) self.classifier7 = nn.Linear(256, self.num_classes, bias=False) self.classifier7.apply(weights_init_classifier) self.classifier8 = nn.Linear(256, self.num_classes, bias=False) self.classifier8.apply(weights_init_classifier) self.classifier9 = nn.Linear(256, self.num_classes, bias=False) self.classifier9.apply(weights_init_classifier) self.classifier10 = nn.Linear(512, self.num_classes, bias=False) self.classifier10.apply(weights_init_classifier) self.classifier11 = nn.Linear(128, self.num_classes, bias=False) self.classifier11.apply(weights_init_classifier) self.classifier12 = nn.Linear(128, self.num_classes, bias=False) self.classifier12.apply(weights_init_classifier) self.classifier13 = nn.Linear(128, self.num_classes, bias=False) self.classifier13.apply(weights_init_classifier) self.classifier14 = nn.Linear(128, self.num_classes, bias=False) self.classifier14.apply(weights_init_classifier) self.classifier15 = nn.Linear(256, self.num_classes, bias=False) self.classifier15.apply(weights_init_classifier) self.classifier16 = nn.Linear(64, self.num_classes, bias=False) self.classifier16.apply(weights_init_classifier) self.classifier17 = nn.Linear(64, self.num_classes, bias=False) self.classifier17.apply(weights_init_classifier) self.classifier18 = nn.Linear(64, self.num_classes, bias=False) self.classifier18.apply(weights_init_classifier) self.classifier19 = nn.Linear(64, self.num_classes, bias=False) self.classifier19.apply(weights_init_classifier) elif 'Jointout' in model_name: self.classifier0 = nn.Linear(64, self.num_classes, bias=False) self.classifier0.apply(weights_init_classifier) self.classifier0_1 = nn.Linear(64, self.num_classes, bias=False) self.classifier0_1.apply(weights_init_classifier) self.classifier1 = nn.Linear(256, self.num_classes, bias=False) self.classifier1.apply(weights_init_classifier) self.classifier1_1 = nn.Linear(256, self.num_classes, bias=False) self.classifier1_1.apply(weights_init_classifier) self.classifier2 = nn.Linear(512, self.num_classes, bias=False) self.classifier2.apply(weights_init_classifier) self.classifier2_1 = nn.Linear(512, self.num_classes, bias=False) self.classifier2_1.apply(weights_init_classifier) self.classifier3 = nn.Linear(1024, self.num_classes, bias=False) self.classifier3.apply(weights_init_classifier) self.classifier3_1 = nn.Linear(1024, self.num_classes, bias=False) self.classifier3_1.apply(weights_init_classifier) self.classifier4 = nn.Linear(2048, self.num_classes, bias=False) self.classifier4.apply(weights_init_classifier) self.classifier4_1 = nn.Linear(2048, self.num_classes, bias=False) self.classifier4_1.apply(weights_init_classifier) self.bottleneck = nn.BatchNorm1d(self.in_planes) self.bottleneck.bias.requires_grad_(False) self.bottleneck.apply(weights_init_kaiming) def forward(self, x, label=None, camid=None): # label is unused if self.cos_layer == 'no' if self.training and self.model_name == 'Jointin_resnet50_SNR3': x, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1 = self.base(x, camid) global_feat = nn.functional.avg_pool2d(x, x.shape[2:4]) global_feat = global_feat.view(global_feat.shape[0], -1) feat = self.bottleneck(global_feat) cls_score = self.classifier(feat) fx4_2 = nn.functional.avg_pool2d(x4_2, x4_2.shape[2:4]) fx4_2 = fx4_2.view(fx4_2.shape[0], -1) ax4_2 = self.classifier1(fx4_2) fx4_1 = nn.functional.avg_pool2d(x4_1, x4_1.shape[2:4]) fx4_1 = fx4_1.view(fx4_1.shape[0], -1) ax4_1 = self.classifier2(fx4_1) fres4_2 = nn.functional.avg_pool2d(res4_2, res4_2.shape[2:4]) fres4_2 = fres4_2.view(fres4_2.shape[0], -1) ares4_2 = self.classifier3(fres4_2) fres4_1 = nn.functional.avg_pool2d(res4_1, res4_1.shape[2:4]) fres4_1 = fres4_1.view(fres4_1.shape[0], -1) ares4_1 = self.classifier4(fres4_1) fx3_3 = nn.functional.avg_pool2d(x3_3, x3_3.shape[2:4]) fx3_3 = fx3_3.view(fx3_3.shape[0], -1) ax3_3 = self.classifier5(fx3_3) fx3_2 = nn.functional.avg_pool2d(x3_2, x3_2.shape[2:4]) fx3_2 = fx3_2.view(fx3_2.shape[0], -1) ax3_2 = self.classifier6(fx3_2) fx3_1 = nn.functional.avg_pool2d(x3_1, x3_1.shape[2:4]) fx3_1 = fx3_1.view(fx3_1.shape[0], -1) ax3_1 = self.classifier7(fx3_1) fres3_2 = nn.functional.avg_pool2d(res3_2, res3_2.shape[2:4]) fres3_2 = fres3_2.view(fres3_2.shape[0], -1) ares3_2 = self.classifier8(fres3_2) fres3_1 = nn.functional.avg_pool2d(res3_1, res3_1.shape[2:4]) fres3_1 = fres3_1.view(fres3_1.shape[0], -1) ares3_1 = self.classifier9(fres3_1) fx2_3 = nn.functional.avg_pool2d(x2_3, x2_3.shape[2:4]) fx2_3 = fx2_3.view(fx2_3.shape[0], -1) ax2_3 = self.classifier10(fx2_3) fx2_2 = nn.functional.avg_pool2d(x2_2, x2_2.shape[2:4]) fx2_2 = fx2_2.view(fx2_2.shape[0], -1) ax2_2 = self.classifier11(fx2_2) fx2_1 = nn.functional.avg_pool2d(x2_1, x2_1.shape[2:4]) fx2_1 = fx2_1.view(fx2_1.shape[0], -1) ax2_1 = self.classifier12(fx2_1) fres2_2 = nn.functional.avg_pool2d(res2_2, res2_2.shape[2:4]) fres2_2 = fres2_2.view(fres2_2.shape[0], -1) ares2_2 = self.classifier13(fres2_2) fres2_1 = nn.functional.avg_pool2d(res2_1, res2_1.shape[2:4]) fres2_1 = fres2_1.view(fres2_1.shape[0], -1) ares2_1 = self.classifier14(fres2_1) fx1_3 = nn.functional.avg_pool2d(x1_3, x1_3.shape[2:4]) fx1_3 = fx1_3.view(fx1_3.shape[0], -1) ax1_3 = self.classifier15(fx1_3) fx1_2 = nn.functional.avg_pool2d(x1_2, x1_2.shape[2:4]) fx1_2 = fx1_2.view(fx1_2.shape[0], -1) ax1_2 = self.classifier16(fx1_2) fx1_1 = nn.functional.avg_pool2d(x1_1, x1_1.shape[2:4]) fx1_1 = fx1_1.view(fx1_1.shape[0], -1) ax1_1 = self.classifier17(fx1_1) fres1_2 = nn.functional.avg_pool2d(res1_2, res1_2.shape[2:4]) fres1_2 = fres1_2.view(fres1_2.shape[0], -1) ares1_2 = self.classifier18(fres1_2) fres1_1 = nn.functional.avg_pool2d(res1_1, res1_1.shape[2:4]) fres1_1 = fres1_1.view(fres1_1.shape[0], -1) ares1_1 = self.classifier19(fres1_1) return cls_score, global_feat, ax4_2, ax4_1, ares4_2, ares4_1, ax3_3, ax3_2, ax3_1, ares3_2, ares3_1, ax2_3, ax2_2, ax2_1, ares2_2, ares2_1, ax1_3, ax1_2, ax1_1, ares1_2, ares1_1 elif 'Jointout' in self.model_name and self.training: x0, x1, x2, x3, x4, res0, res1, res2, res3, res4 = self.base(x, camid) global_feat = nn.functional.avg_pool2d(x4, x4.shape[2:4]) global_feat = global_feat.view(global_feat.shape[0], -1) feat = self.bottleneck(global_feat) cls_score = self.classifier4(feat) res4 = nn.functional.avg_pool2d(res4, res4.shape[2:4]) res4 = res4.view(res4.shape[0], -1) res4 = self.classifier4_1(res4) x3 = nn.functional.avg_pool2d(x3, x3.shape[2:4]) x3 = x3.view(x3.shape[0], -1) x3 = self.classifier3_1(x3) res3 = nn.functional.avg_pool2d(res3, res3.shape[2:4]) res3 = res3.view(res3.shape[0], -1) res3 = self.classifier3(res3) x2 = nn.functional.avg_pool2d(x2, x2.shape[2:4]) x2 = x2.view(x2.shape[0], -1) x2 = self.classifier2(x2) res2 = nn.functional.avg_pool2d(res2, res2.shape[2:4]) res2 = res2.view(res2.shape[0], -1) res2 = self.classifier2_1(res2) x1 = nn.functional.avg_pool2d(x1, x1.shape[2:4]) x1 = x1.view(x1.shape[0], -1) x1 = self.classifier1(x1) res1 = nn.functional.avg_pool2d(res1, res1.shape[2:4]) res1 = res1.view(res1.shape[0], -1) res1 = self.classifier1_1(res1) x0 = nn.functional.avg_pool2d(x0, x0.shape[2:4]) x0 = x0.view(x0.shape[0], -1) x0 = self.classifier0(x0) res0 = nn.functional.avg_pool2d(res0, res0.shape[2:4]) res0 = res0.view(res0.shape[0], -1) res0 = self.classifier0_1(res0) return global_feat, x0, x1, x2, x3, cls_score, res0, res1, res2, res3, res4 x = self.base(x, camid) # print(x.shape) global_feat = nn.functional.avg_pool2d(x, x.shape[2:4]) # print(global_feat.shape) # print(x.shape) # for convert to onnx, kernel size must be from x.shape[2:4] to a constant [20,20] #global_feat = nn.functional.avg_pool2d(x, [16, 16]) # flatten to (bs, 2048), global_feat.shape[0] global_feat = global_feat.view(global_feat.shape[0], -1) feat = self.bottleneck(global_feat) if self.neck == 'no': feat = global_feat elif self.neck == 'bnneck': feat = self.bottleneck(global_feat) if self.training: if self.cos_layer: cls_score = self.arcface(feat, label) else: cls_score = self.classifier(feat) return cls_score, global_feat # global feature for triplet loss else: if self.neck_feat == 'after': # print("Test with feature after BN") return feat else: # print("Test with feature before BN") return global_feat def load_param(self, trained_path): param_dict = torch.load(trained_path) for i in param_dict: if 'classifier' in i or 'arcface' in i: continue self.state_dict()[i].copy_(param_dict[i]) print('Loading pretrained model from {}'.format(trained_path)) def load_param_finetune(self, model_path): param_dict = torch.load(model_path) # for i in param_dict: # print(i)#change by sb # self.state_dict()[i].copy_(param_dict[i]) print('Loading pretrained model for finetuning from {}'.format(model_path)) def make_model(cfg, num_class): model = Backbone(num_class, cfg) return model
/resnet/__init__.py
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
/resnet/config.py
resnet50_path = './resnet/resnet50-19c8e357.pth'
/resnet/make_model.py
from .resnet import ResNet, BasicBlock, Bottleneck import torch from torch import nn from .config import resnet50_path model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } class ResNet50(nn.Module): def __init__(self): super(ResNet50, self).__init__() net = ResNet(last_stride=2, block=Bottleneck, frozen_stages=False, layers=[3, 4, 6, 3]) net.load_param(resnet50_path) self.layer0 = net.layer0 self.layer1 = net.layer1 self.layer2 = net.layer2 self.layer3 = net.layer3 self.layer4 = net.layer4 def forward(self, x): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) return layer4 def load_param(self, trained_path): param_dict = torch.load(trained_path) for i in param_dict: if 'classifier' in i or 'arcface' in i: continue self.state_dict()[i].copy_(param_dict[i]) print('Loading pretrained model from {}'.format(trained_path)) class ResNet50_BIN(nn.Module): def __init__(self): super(ResNet50_BIN, self).__init__() net = ResNet(last_stride=2, block=IN_Bottleneck, frozen_stages=False, layers=[3, 4, 6, 3]) net.load_param(resnet50_path) self.layer0 = net.layer0 self.layer1 = net.layer1 self.layer2 = net.layer2 self.layer3 = net.layer3 self.layer4 = net.layer4 def forward(self, x): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) return layer4 def load_param(self, trained_path): param_dict = torch.load(trained_path) for i in param_dict: if 'classifier' in i or 'arcface' in i: continue self.state_dict()[i].copy_(param_dict[i]) print('Loading pretrained model from {}'.format(trained_path)) class ResNet50_LowIN(nn.Module): def __init__(self): super(ResNet50_LowIN, self).__init__() net = ResNet_LowIN(last_stride=2, block=Bottleneck, frozen_stages=False, layers=[3, 4, 6, 3]) net.load_param(resnet50_path) self.layer0 = net.layer0 self.layer1 = net.layer1 self.layer2 = net.layer2 self.layer3 = net.layer3 self.layer4 = net.layer4 def forward(self, x): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) return layer4 def load_param(self, trained_path): param_dict = torch.load(trained_path) for i in param_dict: if 'classifier' in i or 'arcface' in i: continue self.state_dict()[i].copy_(param_dict[i]) print('Loading pretrained model from {}'.format(trained_path))
/resnext/__init__.py
from .resnext101 import ResNeXt101
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
riadghorra/whiteboard-oop-project
refs/heads/master
{"/src/tools.py": ["/src/figures.py"], "/src/white_board.py": ["/src/figures.py", "/src/tools.py"], "/src/client.py": ["/src/white_board.py"], "/src/main.py": ["/src/white_board.py"]}
└── └── src ├── client.py ├── figures.py ├── main.py ├── serveur.py ├── tools.py └── white_board.py
/src/client.py
import socket import json import sys import math from white_board import WhiteBoard, binary_to_dict ''' Ouverture de la configuration initiale stockée dans config.json qui contient le mode d'écriture, la couleur et la taille d'écriture. Ces Paramètres sont ensuite à modifier par l'utisateur dans l'interface pygame ''' with open('config.json') as json_file: start_config = json.load(json_file) ''' définition de l'adresse IP du serveur. Ici le serveur est en local. ''' hote = start_config["ip_serveur"] port = 5001 def main(): """ Création d'un socket pour communiquer via un protocole TCP/IP """ connexion_avec_serveur = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connexion au serveur try: connexion_avec_serveur.connect((hote, port)) except (TimeoutError, ConnectionRefusedError, ConnectionResetError, ConnectionAbortedError) as e: return print("Le serveur n'a pas répondu, vérifiez les paramètres de connexion") print("Connexion réussie avec le serveur") # First get the client id username = binary_to_dict(connexion_avec_serveur.recv(2 ** 16))["client_id"] # Second get the message size msg_recu = connexion_avec_serveur.recv(2 ** 8) message_size = binary_to_dict(msg_recu)["message_size"] # Then get the first chunk of history using the number of byte equal to the power of 2 just above its size msg_recu = connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1)) total_size_received = sys.getsizeof(msg_recu) # One we get the first chunk, we loop until we get the whole history while total_size_received < message_size: msg_recu += connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1)) total_size_received = sys.getsizeof(msg_recu) msg_decode = binary_to_dict(msg_recu) hist = msg_decode # Après réception de l'état du whiteboard, c'est à dire des figures et textboxes déjà dessinées par des utilisateurs # précédents, le programme lance un whiteboard whiteboard = WhiteBoard(username, start_config, hist) whiteboard.start(connexion_avec_serveur) if __name__ == '__main__': main()
/src/figures.py
""" Module contenant toutes les figures et opérations de base """ import pygame import pygame.draw from datetime import datetime def distance(v1, v2): """ Calcule la distance euclidienne entre deux vecteurs """ try: return ((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2) ** 0.5 except TypeError: return "Ce ne sont pas des vecteurs" class Figure: def __init__(self): pass def draw(self): pass def fetch_params(self): pass class Point(Figure): """ Classe d'un point prêt à être tracé sur le tableau coord (list) : coordonées point_color (list) : couleur en RGB font_size (int) : epaisseur en pixels toolbar_size (int) : epaisseur de la toolbar en haut du tableau sur laquelle on ne veut pas que le point depasse """ def __init__(self, coord, point_color, font_size, toolbar_size=0): Figure.__init__(self) self.point_color = point_color self.font_size = font_size # used to not write on the toolbar if the font size is big self.coord = [coord[0], max(coord[1], toolbar_size + font_size + 1)] self.type = "Point" def draw(self, screen): """ Dessine le point sur l'ecran """ pygame.draw.circle(screen, self.point_color, self.coord, self.font_size) pygame.display.flip() return def fetch_params(self): """ Retourne un dictionnaire des parametres """ return {"coord": self.coord, "point_color": self.point_color, "font_size": self.font_size} class Line(Figure): """ Classe d'une ligne droite line_color (list) : couleur de la ligne en RGB start_pos (list): coordonee du debut de la ligne droite end_pos (list) : coordonee de la fin de la ligne droite font_size (int): epaisseur """ def __init__(self, line_color, start_pos, end_pos, font_size): Figure.__init__(self) self.line_color = line_color self.start_pos = start_pos self.end_pos = end_pos self.font_size = font_size self.type = "Line" def draw(self, screen): """ Dessine la ligne sur l'ecran """ pygame.draw.line(screen, self.line_color, self.start_pos, self.end_pos, self.font_size) return def fetch_params(self): """ Retourne un dictionnaire des parametres """ return {"line_color": self.line_color, "start_pos": self.start_pos, "end_pos": self.end_pos, "font_size": self.font_size} class Rectangle(Figure): """ Classe d un rectangle color (list) : couleur du rectangle left, right (int) : coordonees d'absice a gauche, droite du rectangle bottom, top (int) : coordonees d'ordonnee en haut et en bas du rectangle """ def __init__(self, c1, c2, color): """ On definit les parametres du rectangle a partir des coordonees de deux coins c1, c2 (lists): coordonees de deux coins du rectangle """ Figure.__init__(self) self.c1 = c1 self.c2 = c2 self.color = color # on recupere left avec le min des abscisses et on fait pareil pour right top et bottom self.left = min(c1[0], c2[0]) self.top = min(c1[1], c2[1]) self.right = max(c1[0], c2[0]) self.bottom = max(c1[1], c2[1]) self.width = self.right - self.left self.length = self.bottom - self.top self.rect = pygame.Rect(self.left, self.top, self.width, self.length) self.type = "rect" def draw(self, screen): """ Dessine le rectangle sur l'ecran """ pygame.draw.rect(screen, self.color, self.rect, 0) def fetch_params(self): """ Retourne un dictionnaire des parametres """ return {"c1": self.c1, "c2": self.c2, "color": self.color} class Circle(Figure): """ Classe d un cercle center (list) : les coordonees du centre extremity (list) : les coordonees d'une extremite color (list) : couleur toolbar_size (int) : la taille de la toolbar en pixel pour ne pas dessiner dessus radius (int) : rayon """ def __init__(self, center, extremity, color, toolbar_size=0): Figure.__init__(self) self.center = center # on ne veut pas depasser sur la toolbar donc on reduit le rayon self.radius = min(int(distance(center, extremity)), center[1] - toolbar_size - 1) self.extremity = [center[0] + self.radius, center[1]] self.color = color self.type = "circle" def draw(self, screen): """ dessine le cercle sur l ecran """ pygame.draw.circle(screen, self.color, self.center, self.radius) def fetch_params(self): """ Retourne un dictionnaire des parametres """ return {"center": self.center, "extremity": self.extremity, "color": self.color} class TextBox(Figure): """ Classe d une textbox x, y (int) : l'abscisse a gauche et l'ordonee a droite de la textbox ie (x,y) est le topleft w (int) : longueur de la textbox h (int) : hauteur de la textbox box_color (list) : couleur du contour de la box font (string) : police du texte font_size (int) : taille des caracteres text (string) : texte de la texbox text_color (list) : couleur du texte """ def __init__(self, x, y, w, h, box_color, font, font_size, text, text_color): Figure.__init__(self) self.__rect = pygame.Rect(x, y, w, h) self._color = box_color self._text = text self._font = font self._font_size = font_size self._sysfont = pygame.font.SysFont(font, font_size) self._text_color = text_color self._txt_surface = self._sysfont.render(text, True, self._text_color) self.id_counter = str(x) + "_" + str(y) self.type = "Text_box" """ Encapsulation """ def fetch_params(self): """ Retourne un dictionnaire des parametres """ return {"x": self.__rect.x, "y": self.__rect.y, "w": self.__rect.w, "h": self.__rect.h, "box_color": self._color, "font": self._font, "font_size": self._font_size, "text": self._text, "text_color": self._text_color} def get_textbox_color(self): return self._color def set_textbox_color(self, new_color): self._color = new_color def get_textbox_text(self): return self._text def add_character_to_text(self, char, whiteboard): """ rajoute un caractere au texte """ id_counter = whiteboard.active_box.id_counter for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']: if action['id'] == id_counter: if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name: self._text += char action['params']["text"] = whiteboard.active_box.get_textbox_text() action['params']["w"] = whiteboard.active_box.update() now = datetime.now() timestamp = datetime.timestamp(now) action['timestamp'] = timestamp action['client'] = whiteboard.name action_to_update_textbox = action for textbox in whiteboard.get_text_boxes(): if textbox.id_counter == id_counter: if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name: whiteboard.del_text_box(textbox) try: whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"])) except UnboundLocalError: print('Something unexpected happened. A textbox update may have failed') def delete_char_from_text(self, whiteboard): """ efface le dernier caractere du texte """ id_counter = whiteboard.active_box.id_counter for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']: if action['id'] == id_counter: if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name: self._text = self._text[:-1] action['params']["text"] = whiteboard.active_box.get_textbox_text() now = datetime.now() timestamp = datetime.timestamp(now) action['timestamp'] = timestamp action['client'] = whiteboard.name action_to_update_textbox = action for textbox in whiteboard.get_text_boxes(): if textbox.id_counter == id_counter: if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name: whiteboard.del_text_box(textbox) try: whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"])) except UnboundLocalError: print('Something unexpected happened. A textbox update may have failed') def render_font(self, text, color, antialias=True): """ effectue le rendu du texte """ return self._sysfont.render(text, antialias, color) def set_txt_surface(self, value): self._txt_surface = value @property def rect(self): return self.__rect def update(self): """ Change la taille du rectangle de contour si le texte est trop long """ width = max(140, self._txt_surface.get_width() + 20) self.__rect.w = width return width def draw(self, screen): """ dessine la textbox """ # Blit le texte screen.blit(self._txt_surface, (self.__rect.x + 5, self.__rect.y + 5)) # Blit le rectangle pygame.draw.rect(screen, self._color, self.__rect, 2) # ============================================================================= # fonction de dessins instantanees # ============================================================================= def draw_point(params, screen): """ dessine un point sur l'ecran avec les parametres d entree params (dict) : dictionnaires des parametres screen (pygame screen) : ecran sur lequel dessiner """ try: return Point(**params).draw(screen) except TypeError: return "Parametres incorrect" def draw_line(params, screen): """ dessine une ligne sur l'ecran avec les parametres d entree params (dict) : dictionnaires des parametres screen (pygame screen) : ecran sur lequel dessiner """ try: return Line(**params).draw(screen) except TypeError: return "Parametres incorrect" def draw_textbox(params, screen): """ dessine une textbox sur l'ecran avec les parametres d entree params (dict) : dictionnaires des parametres screen (pygame screen) : ecran sur lequel dessiner """ try: return TextBox(**params).draw(screen) except TypeError: return "Parametres incorrect" def draw_rect(params, screen): """ dessine un rectangle sur l'ecran avec les parametres d entree params (dict) : dictionnaires des parametres screen (pygame screen) : ecran sur lequel dessiner """ try: return Rectangle(**params).draw(screen) except TypeError: return "Parametres incorrect" def draw_circle(params, screen): """ dessine un cercle sur l'ecran avec les parametres d entree params (dict) : dictionnaires des parametres screen (pygame screen) : ecran sur lequel dessiner """ try: return Circle(**params).draw(screen) except TypeError: return "Parametres incorrect"
/src/main.py
from white_board import WhiteBoard import json ''' This file is used to run locally or to debug ''' with open('config.json') as json_file: start_config = json.load(json_file) def main(): board = WhiteBoard("client", start_config) board.start_local() if __name__ == '__main__': main()
/src/serveur.py
import socket import sys import time from threading import Thread import json ''' Les deux fonctions fonctions suivantes permettent de convertir les dictionnaires en binaire et réciproquement. L'appel de ces dux fonctions permet d'échanger des dictionnaires par socket ''' def dict_to_binary(dico): try: str = json.dumps(dico) return bytes(str, 'utf-8') except TypeError: print("Le dictionnaire n'est pas du format attendu") def binary_to_dict(binary): try: jsn = ''.join(binary.decode("utf-8")) d = json.loads(jsn) except (TypeError, json.decoder.JSONDecodeError) as e: if e == TypeError: print("Le message reçu n'est pas du format attendu") else: print("Un paquet a été perdu") return {"actions": [], "message": [], "auth": []} return d class Client(Thread): """ Classe d'un client qui se connecte au whiteboard. Cette classe hérite de Thread de sorte que plusieurs clients pourront utiliser le whiteboard en parallèle. Chaque client a un nom, un booleen qui indique si le client a terminé d'utiliser le whiteboard, ainsi qu'un historique avec toutes les opérations effectuées par lui ou les autres utilisateurs sur le whiteboard. C'est cet historique que le client va échanger avec le serveur """ # Class level id for client client_id = 1 def __init__(self, server_, client_socket=None): Thread.__init__(self) self._client_socket = client_socket self._done = False self._last_timestamp_sent = 0 self.server = server_ # Increment client id at each creation of instance self.client_id = "Client" + str(Client.client_id) Client.client_id += 1 """Encapsulation""" def __get_client_socket(self): return self._client_socket def __set_client_socket(self, c): self._client_socket = c client_socket = property(__get_client_socket, __set_client_socket) def __get_last_timestamp_sent(self): return self._last_timestamp_sent def __set_last_timestamp_sent(self, c): self._last_timestamp_sent = c last_timestamp_sent = property(__get_last_timestamp_sent, __set_last_timestamp_sent) def is_done(self): return self._done def end(self): self._done = True def check_match(self, action): """ methode permettant de vérifier si une action est déjà existante dans l'objet self._current_hist. Elle permet notamment de savoir si une textbox vient d'être rajoutée par un autre utilisateur du whiteboard ou si la textbox a simplement été mise à jour """ for textbox in [x for x in self.server.historique["actions"] if x["type"] == "Text_box"]: if action["id"] == textbox["id"]: textbox["timestamp"] = action["timestamp"] textbox["params"] = action["params"] textbox["client"] = action["client"] return True return False def disconnect_client(self): """ methode s'executant pour mettre fin à la connexion entre le serveur et un client """ self.end() print("Déconnexion d'un client") self.server.historique["message"] = "end" def run(self): """ Dans cette methode, la boucle while centrale vient en continu récupérer les dictionnaires d'historiques envoyés par les clients. Si le dictionnaire est différent du précédent, cela signifie qu'une mise à jour a été faite par un utilisateur. Il convient alors de comparer le timestamp de ces mises à jour au last_timestamp qui est le dernier timestamp où le whiboard était à jour. Toutes les nouvelles opérations sont ensuite envoyées au client """ try: while not self.is_done(): msg_recu = self.client_socket.recv(2 ** 24) new_actions = binary_to_dict(msg_recu) # Go through each new action and add them to history and there are two cases : if it's an action on # an already existing text box then modify it in history, else append the action to the history for action in new_actions["actions"]: matched = False if action["type"] == "Text_box": matched = self.check_match(action) if not matched: self.server.historique["actions"].append(action) if self.server.historique["message"] == "END": # S'éxécute si le client se déconnecte self.disconnect_client() if new_actions["auth"] != []: if new_actions["auth"][1]: self.server.historique["auth"].append(new_actions["auth"][0]) else: self.server.historique["auth"].remove(new_actions["auth"][0]) time.sleep(0.01) actions_to_send = [x for x in self.server.historique["actions"] if (x["timestamp"] > self.last_timestamp_sent and x["client"] != self.client_id)] to_send = {"message": "", 'actions': actions_to_send, 'auth': self.server.historique["auth"]} self.client_socket.send(dict_to_binary(to_send)) # Update last timestamp if there is a new action if actions_to_send: self.last_timestamp_sent = max([x["timestamp"] for x in actions_to_send]) except (ConnectionAbortedError, ConnectionResetError) as e: # Gère la déconnexion soudaine d'un client print("Un client s'est déconnecté") class Server: """ Cette classe définit un serveur. Elle a pour paramètres un port et une adresse hôte nécessaire à la création d'une connexion, également une connexion socket propre au serveur, ainsi qu'une liste des clients à connecter, une liste des threads lancés qui est la liste des clients actuellement connectés et un dictionnaire historique des opérations faites sur le serveur à échanger avec les différents clients """ def __init__(self, port, host='', historique=None): self._host = host self._port = port self.__connexion = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.__clients = [] self.__threadlaunched = [] if historique is None: self.historique = {"message": "", 'actions': [], 'auth': []} else: self.historique = historique '''Les méthodes et properties suivantes permettent de gérer les encapsulations''' @property def host(self): return self._host @property def port(self): return self._port @property def clients(self): return self.__clients def add_client(self, new_client): self.__clients.append(new_client) def remove_client(self, client_removed): self.__clients.remove(client_removed) @property def threadlaunched(self): return self.__threadlaunched def add_thread(self, new_thread): self.__threadlaunched.append(new_thread) def remove_thread(self, thread_removed): self.__threadlaunched.remove(thread_removed) def scan_new_client(self): """Cette méthode permet de récupérer les informations du client entrant""" # Get connexion info from server client, infos_connexion = self.__connexion.accept() # Initialize a new client thread new_thread = Client(self) # Give them an id and send it to server client_id = new_thread.client_id client.send(dict_to_binary({"client_id": client_id})) to_send = dict_to_binary(self.historique) # Get the size of history and send it because it can be too long message_size = sys.getsizeof(to_send) client.send(dict_to_binary({"message_size": message_size})) # Wait a little for the previous message to not overlap with the next one ## !!WARNING!! DEPENDING ON THE COMPUTER THIS SLEEP TIME MAY BE TOO SMALL, IF THE WHITEBOARD CRASHES, PLEASE ## INCREASE IT time.sleep(0.5) client.send(to_send) # Get the last timestamp sent to client try: new_thread.last_timestamp_sent = max([x["timestamp"] for x in self.historique["actions"]]) except ValueError: new_thread.last_timestamp_sent = 0 new_thread.client_socket = client self.add_client(new_thread) print("Un client s'est connecté. Bienvenue {} !".format(client_id)) def run(self): """ Dans cette méthode, la boucle while permet d'écouter en permanence de nouveaux clients potentiels et de gérer les déconnexions de clients et fermetures de thread""" self.__connexion.bind((self.host, self.port)) # Le serveur acceptera maximum 100 clients self.__connexion.listen(100) print("Le serveur est prêt sur le port numéro {}".format(self.port)) while True: self.scan_new_client() for client in self.clients: client.start() self.remove_client(client) self.add_thread(client) for thread in self.threadlaunched: if thread.is_done(): thread.join() self.remove_thread(thread) if __name__ == '__main__': server = Server(5001, '') server.run()
/src/tools.py
""" Module contenant les differents outils de gestion du tableau """ import pygame import pygame.draw from datetime import datetime from figures import Point, Line, TextBox, Rectangle, Circle import time # ============================================================================= # classes de gestion des changements de parametres utilisateur # ============================================================================= class TriggerBox: """ Classe mere abstraite qui represente une zone carree de l'ecran sur laquelle on peut cliquer top_left (list) : coordonees du pixel en haut a gauche size (int) : taille en pixel du cote du carre """ def __init__(self, top_left, size): self.rect = pygame.Rect(top_left, size) self.coords = top_left def is_triggered(self, event): """ retourne le booleen : l'utilisateur clique sur la triggerbox event (pygame event) : clic de souris d un utilisateur """ return self.rect.collidepoint(event.pos) class Auth(TriggerBox): """ Classe d'un bouton qui change l'autorisation de modification """ def __init__(self, top_left, size): TriggerBox.__init__(self, top_left, size) self._size = size def add(self, screen): """ Dessine la authbox """ pygame.draw.rect(screen, [0, 0, 0], self.rect, 1) pygame.draw.circle(screen, [255, 0, 0], [int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)], int(min(self._size[0], self._size[1] / 3))) font = pygame.font.Font(None, 18) legend = {"text": font.render("auth", True, [0, 0, 0]), "coords": self.coords} screen.blit(legend["text"], legend["coords"]) def switch(self, screen, erasing_auth, modification_allowed, name): if erasing_auth: pygame.draw.circle(screen, [0, 255, 0], [int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)], int(min(self._size[0], self._size[1] / 3))) print("{} a donné son autorisation de modifications".format(name)) else: pygame.draw.circle(screen, [255, 0, 0], [int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)], int(min(self._size[0], self._size[1] / 3))) print("{} a retiré son autorisation de modifications".format(name)) return [name, erasing_auth] class Save(TriggerBox): """ Classe d'un bouton qui permet la sauvegarde du whiteboard en format PNG """ def __init__(self, top_left, size): TriggerBox.__init__(self, top_left, size) self._size = size def add(self, screen): """ Dessine la savebox """ pygame.draw.rect(screen, [0, 0, 0], self.rect, 1) font = pygame.font.Font(None, 18) legend = {"text": font.render("save", True, [0, 0, 0]), "coords": self.coords} screen.blit(legend["text"], legend["coords"]) def save(self, screen, whiteboard): pygame.image.save(screen.subsurface((0, whiteboard.get_config(["toolbar_y"]) + 1, whiteboard.get_config(["width"]), whiteboard.get_config(["length"]) - whiteboard.get_config( ["toolbar_y"]) - 1)), "mygreatdrawing.png") class Mode(TriggerBox): """ Classe d'un mode de dessin du tableau dans lequel on peut rentrer via la triggerbox dont il herite name (string) : nom du mode qui sera inscrit dans sa triggerbox sur l'ecran """ def __init__(self, name, top_left, size): super(Mode, self).__init__(top_left, size) self.name = name def add(self, screen): """ Dessine la triggerbox du mode et la rend active sur l'ecran """ pygame.draw.rect(screen, [0, 0, 0], self.rect, 1) font = pygame.font.Font(None, 18) legend = {"text": font.render(self.name, True, [0, 0, 0]), "coords": self.coords} screen.blit(legend["text"], legend["coords"]) class ColorBox(TriggerBox): """ Classe d'une triggerbox de choix de couleur sur l'ecran color (list) : color of the box """ def __init__(self, color, top_left, size): super(ColorBox, self).__init__(top_left, size) self.color = color def add(self, screen): """ Dessine la colorbox """ pygame.draw.rect(screen, self.color, self.rect) class FontSizeBox(TriggerBox): """ Classe des triggerbox de choix de l'epaisseur du trait font_size (int) : epaisseur du trait en pixel """ def __init__(self, font_size, top_left, size): super(FontSizeBox, self).__init__(top_left, size) self.font_size = font_size self.center = [top_left[0] + size[0] // 2, top_left[1] + size[1] // 2] # pour dessiner un cercle representant l epaisseur de selection def add(self, screen): """ Dessine la fontsizebox """ pygame.draw.rect(screen, [0, 0, 0], self.rect, 1) pygame.draw.circle(screen, [0, 0, 0], self.center, self.font_size) # ============================================================================= # classes de gestion des evenements utilisateur # ============================================================================= class EventHandler: """ Classe mere des gestionnaires d'evenements utilisateur en fontcion des modes whiteboard : classe whiteboard sur laquelle notre handler va gerer les evenements utilisateur """ def __init__(self, whiteboard): self.whiteboard = whiteboard def handle(self, event): """ Ce test commun a tous les modes verifie si l'utilisateur quitte ou change de mode """ out = False if event.type == pygame.QUIT: self.whiteboard.end() self.whiteboard.switch_config("quit") out = True if event.type == pygame.MOUSEBUTTONDOWN: coord = event.dict['pos'] if coord[1] <= self.whiteboard.get_config(["toolbar_y"]): self.whiteboard.switch_config(event) out = True return out class HandlePoint(EventHandler): """ Classe du gestionnaire d'evenement en mode point """ def __init__(self, whiteboard): EventHandler.__init__(self, whiteboard) def handle_all(self, event): """ En mode point on s'interesse aux clics gauches de souris et on dessine un point """ handled = self.handle(event) # commun a tous les handler qui verifie si on change de mode ou on quitte if handled: return if event.type == pygame.MOUSEBUTTONDOWN: if event.dict["button"] != 1: return coord = event.dict["pos"] to_draw = Point(coord, self.whiteboard.get_config(["active_color"]), self.whiteboard.get_config(["font_size"]), self.whiteboard.get_config(["toolbar_y"])) now = datetime.now() timestamp = datetime.timestamp(now) self.whiteboard.draw(to_draw, timestamp) class HandleLine(EventHandler): """ Classe du gestionnaire d'evenement en mode ligne """ def __init__(self, whiteboard): EventHandler.__init__(self, whiteboard) def handle_mouse_motion(self): """ Gere les mouvements de souris : l'utilisateur a le clic enfonce le rendu du trait est en direct """ if self.whiteboard.is_drawing(): self.whiteboard.mouse_position = pygame.mouse.get_pos() if self.whiteboard.mouse_position[1] <= self.whiteboard.get_config(["toolbar_y"]): self.whiteboard.pen_up() elif self.whiteboard.last_pos is not None: to_draw = Line(self.whiteboard.get_config(["active_color"]), self.whiteboard.last_pos, self.whiteboard.mouse_position, self.whiteboard.get_config(["font_size"])) now = datetime.now() timestamp = datetime.timestamp(now) self.whiteboard.draw(to_draw, timestamp) self.whiteboard.update_last_pos() def handle_mouse_button_up(self): """ Gere la levee du doigt sur le clic : on effectue un pen up """ self.whiteboard.mouse_position = (0, 0) self.whiteboard.pen_up() self.whiteboard.reset_last_pos() def handle_mouse_button_down(self): """ Gere le clic de l'utilisateur : pen down """ self.whiteboard.pen_down() def handle_all(self, event): """ Gere tous les evenements avec la methode associe via un arbre de if """ handled = self.handle(event) if handled: return elif event.type == pygame.MOUSEMOTION: self.handle_mouse_motion() elif event.type == pygame.MOUSEBUTTONUP: self.handle_mouse_button_up() elif event.type == pygame.MOUSEBUTTONDOWN: self.handle_mouse_button_down() pygame.display.flip() class HandleText(EventHandler): """ Classe du gestionnaire d'evenement en mode textbox """ def __init__(self, whiteboard): EventHandler.__init__(self, whiteboard) def box_selection(self, event): """ Gere les clics utilisateur S'il s'agit d'un clic droit, on cree une nouvelle box S'il s'agit d'un clic gauche on regarde si cela selectionne une zone d une ancienne box qui deviendra la box active """ if event.dict["button"] == 3: coord = event.dict['pos'] text_box = TextBox(*coord, self.whiteboard.get_config(["text_box", "textbox_width"]), self.whiteboard.get_config(["text_box", "textbox_length"]), self.whiteboard.get_config(["text_box", "active_color"]), self.whiteboard.get_config(["text_box", "font"]), self.whiteboard.get_config(["text_box", "font_size"]), "", self.whiteboard.get_config(["active_color"])) self.whiteboard.append_text_box(text_box) now = datetime.now() timestamp = datetime.timestamp(now) self.whiteboard.draw(text_box, timestamp) self.whiteboard.set_active_box(text_box) elif event.dict["button"] == 1: for box in self.whiteboard.get_text_boxes(): if box.rect.collidepoint(event.pos): self.whiteboard.set_active_box(box, new=False) def write_in_box(self, event): """ Gere les entrees clavier de l'utilisateur Si une box est selectionnee cela modifie le texte en consequence """ if self.whiteboard.active_box is not None: # on efface un caractere if event.key == pygame.K_BACKSPACE: self.whiteboard.active_box.delete_char_from_text(self.whiteboard) # pour modifier la box il est malheureusement necessaire de re-render tout le tableau self.whiteboard.clear_screen() self.whiteboard.load_actions(self.whiteboard.get_hist()) elif event.key == pygame.K_TAB or event.key == pygame.K_RETURN: pass else: self.whiteboard.active_box.add_character_to_text(event.unicode, self.whiteboard) # on re-render tout aussi ici pour éviter de superposer des écritures self.whiteboard.clear_screen() self.whiteboard.load_actions(self.whiteboard.get_hist()) if self.whiteboard.active_box is not None: # Re-render the text. self.whiteboard.active_box.set_txt_surface(self.whiteboard.active_box.render_font( self.whiteboard.active_box.get_textbox_text(), self.whiteboard.active_box.get_textbox_color())) def handle_all(self, event): """ Gere tous les evenements avec la methode associée via un arbre de if """ handled = self.handle(event) if handled: return if event.type == pygame.MOUSEBUTTONDOWN: self.box_selection(event) if event.type == pygame.KEYDOWN: self.write_in_box(event) pygame.display.flip() class HandleRect(EventHandler): """ Classe du gestionnaire d'evenement en mode rectangle Nous avons decidé de faire un systeme de clic drag pour tracer un rectangle """ def __init__(self, whiteboard): EventHandler.__init__(self, whiteboard) self.c1 = None def handle_mouse_button_up(self, coord): """ Recupere la deuxieme coordonee d'un coin du rectangle a tracer quand l'utilisateur arrete de cliquer """ if self.c1 is not None: coord = list(coord) # on ne veut pas depasser sur la toolbar coord[1] = max(self.whiteboard.get_config(["toolbar_y"]), coord[1]) to_draw = Rectangle(self.c1, coord, self.whiteboard.get_config(["active_color"])) now = datetime.now() timestamp = datetime.timestamp(now) self.whiteboard.draw(to_draw, timestamp) self.c1 = None def handle_mouse_button_down(self, event): """ Recupere une coordonee d'un coin du rectangle a tracer quand l'utilisateur démarre un clic """ if event.dict["button"] != 1: return self.c1 = event.dict['pos'] def handle_all(self, event): """ Gere tous les evenements avec la methode associe via un arbre de if """ handled = self.handle(event) if handled: return elif event.type == pygame.MOUSEBUTTONUP: self.handle_mouse_button_up(coord=event.dict['pos']) elif event.type == pygame.MOUSEBUTTONDOWN: self.handle_mouse_button_down(event) pygame.display.flip() class HandleCircle(EventHandler): """ Classe du gestionnaire d'evenement en mode Cercle Nous avons decidé de faire un systeme de clic drag la-encore pour tracer un cercle """ def __init__(self, whiteboard): EventHandler.__init__(self, whiteboard) self.center = None def handle_mouse_button_up(self, coord): """ Recupere la coordonee d'un point sur le cercle quand l'utilisateur arrete de cliquer """ if self.center is not None: coord = list(coord) to_draw = Circle(self.center, coord, self.whiteboard.get_config(["active_color"]), self.whiteboard.get_config(["toolbar_y"])) now = datetime.now() timestamp = datetime.timestamp(now) self.whiteboard.draw(to_draw, timestamp) self.center = None def handle_mouse_button_down(self, event): """ Recupere la coordonnee du centre du cercle quand l'utilisateur demarre un clic """ if event.dict["button"] != 1: return self.center = event.dict['pos'] def handle_all(self, event): """ Gere tous les evenements avec la methode associe via un arbre de if """ handled = self.handle(event) if handled: return elif event.type == pygame.MOUSEBUTTONUP: self.handle_mouse_button_up(coord=event.dict['pos']) elif event.type == pygame.MOUSEBUTTONDOWN: self.handle_mouse_button_down(event) pygame.display.flip()
/src/white_board.py
import pygame import pygame.draw import json import sys from functools import reduce import operator from figures import TextBox, draw_line, draw_point, draw_textbox, draw_rect, draw_circle from tools import Mode, ColorBox, Auth, Save, FontSizeBox, HandlePoint, HandleLine, HandleText, HandleRect, HandleCircle import copy ''' Ouverture de la configuration initiale ''' def dict_to_binary(the_dict): str = json.dumps(the_dict) return bytes(str, 'utf-8') def binary_to_dict(binary): try: jsn = ''.join(binary.decode("utf-8")) d = json.loads(jsn) except (TypeError, json.decoder.JSONDecodeError) as e: if e == TypeError: print("Le message reçu n'est pas du format attendu") else: print('Un paquet a été perdu') return {"actions": [], "message": [], "auth": []} return d class WhiteBoard: def __init__(self, client_name, start_config, start_hist=None): """ Whiteboard initialization : we build the GUI using the config file and the potential history of actions made by other users. Returns a Whiteboard window ready to use. :param client_name: Name of the client who just opened a new whiteboard window (str) :param start_config: Whiteboard configuration stored in config.json and loaded as a dict (dict) :param start_hist: History of actions by other users (dict) """ pygame.init() if not isinstance(client_name, str): raise TypeError("Client name must be a string") if not isinstance(start_config, dict): raise TypeError("Starting configuration file must be a dictionary") if start_hist is None: start_hist = {"actions": [], "message": [], "auth": []} elif not isinstance(start_hist, dict): raise TypeError("Starting history file must be a dictionary") self._done = False self._config = start_config self._name = client_name self._hist = start_hist self.__screen = pygame.display.set_mode([self._config["width"], self._config["length"]]) self.__screen.fill(self._config["board_background_color"]) self.__handler = {"line": HandleLine(self), "point": HandlePoint(self), "text": HandleText(self), "rect": HandleRect(self), "circle": HandleCircle(self)} pygame.draw.line(self.__screen, self._config["active_color"], [0, self._config["toolbar_y"]], [self._config["width"], self._config["toolbar_y"]], 1) # We create a global variable to keep track of the position of the last mode box we create in order to make # sure that there is no overlapping between left and right boxes on the toolbar on the toolbar """ Tracé de la box auth, qui permet de donner l'autorisation de modification des textbox """ last_left_position = 0 last_right_position = self._config["width"] - self._config["mode_box_size"][0] self._erasing_auth = False try: assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \ "toolbar, please increase width in config.json" self.__auth_box = Auth((last_left_position, 0), tuple(self._config["auth_box_size"])) last_left_position += self._config["mode_box_size"][0] self.__auth_box.add(self.__screen) except AssertionError as e: print(e) pygame.quit() sys.exit() """ Tracé de la boite save qui permet d'enregistrer l'image """ try: assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \ "toolbar, please increase width in config.json" self.__save_box = Save((last_left_position, 0), tuple(self._config["auth_box_size"])) last_left_position += self._config["mode_box_size"][0] self.__save_box.add(self.__screen) except AssertionError as e: print(e) pygame.quit() sys.exit() self.__modes = [Mode("point", (2 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])), Mode("line", (3 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])), Mode("text", (4 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])), Mode("rect", (5 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])), Mode("circle", (6 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])) ] # If right and left boxes overlap, raise an error and close pygame try: for mod in self.__modes: assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \ "toolbar, please increase width in config.json" mod.add(self.__screen) last_left_position += self._config["mode_box_size"][0] except AssertionError as e: print(e) pygame.quit() sys.exit() """ Choix des couleurs """ self.__colors = [] try: for key, value in self._config["color_palette"].items(): assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \ "toolbar, please increase width in config.json" color_box = ColorBox(value, (last_right_position, 0), tuple(self._config["mode_box_size"])) last_right_position -= self._config["mode_box_size"][0] self.__colors.append(color_box) color_box.add(self.__screen) except AssertionError as e: print(e) pygame.quit() sys.exit() """ Choix des épaisseurs """ self.__font_sizes = [] try: for size in self._config["pen_sizes"]: assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \ "toolbar, please increase width in config.json" font_size_box = FontSizeBox(size, (last_right_position, 0), tuple(self._config["mode_box_size"])) last_right_position -= self._config["mode_box_size"][0] self.__font_sizes.append(font_size_box) font_size_box.add(self.__screen) except AssertionError as e: print(e) pygame.quit() sys.exit() """ initialisation des variables de dessin """ pygame.display.flip() self._draw = False self._last_pos = None self._mouse_position = (0, 0) """ Initialisation des paramètres des text boxes """ self._text_boxes = [] # Cette liste contiendra les objets de type Textbox self.active_box = None self.load_actions(self._hist) self.__modification_allowed = copy.deepcopy(self._hist["auth"]) # if some client names are in this list, you will have the authorisation to edit their textboxes for action in self._hist["actions"]: if action["type"] == "Text_box": self.append_text_box(TextBox(**action["params"])) """ Encapsulation """ def is_done(self): return self._done def end(self): self._done = True def get_config(self, maplist): """ Getter of config file. Uses a list of keys to traverse the config dict :param maplist: list of keys from parent to child to get the wanted value (list) :return: value of a key in the config file (object) """ if not type(maplist) == list: maplist = list(maplist) try: return reduce(operator.getitem, maplist, self._config) except (KeyError, TypeError): return None def set_config(self, maplist, value): """ Setter of config file. Uses the getter and assigns value to a key :param maplist: list of keys from parent to child to get the wanted value (list) :param value: value to set (object) :return: None if failed """ if not type(maplist) == list: maplist = list(maplist) try: self.get_config(maplist[:-1])[maplist[-1]] = value except (KeyError, TypeError): return None def get_hist(self, key=None): if key is None: return self._hist else: return self._hist[key] def add_to_hist(self, value): self._hist["actions"].append(value) @property def screen(self): return self.__screen def clear_screen(self): """ Clear the screen by coloring it to background color. Does not color the toolbar :return: """ self.__screen.fill(self.get_config(["board_background_color"]), (0, self.get_config(["toolbar_y"]) + 1, self.get_config(["width"]), self.get_config(["length"]) - self.get_config( ["toolbar_y"]) + 1)) def is_drawing(self): return self._draw def pen_up(self): self._draw = False def pen_down(self): self._draw = True @property def name(self): return self._name @property def modification_allowed(self): return self.__modification_allowed @property def last_pos(self): return self._last_pos def reset_last_pos(self): self._last_pos = None def update_last_pos(self): self._last_pos = self._mouse_position def __get_mouse_position(self): return self._mouse_position def __set_mouse_position(self, value): self._mouse_position = value mouse_position = property(__get_mouse_position, __set_mouse_position) def get_text_boxes(self): return self._text_boxes def append_text_box(self, textbox): self._text_boxes.append(textbox) def del_text_box(self, textbox): self._text_boxes.remove(textbox) def draw(self, obj, timestamp): """ Method to draw figures defined in figures.py. Also adds drawn objects to history. :param obj: class of figure to draw :param timestamp: timestamp at which the drawing happens :return: None """ # Draw object on screen obj.draw(self.__screen) # Create dict containing object parameters and right timestamp to add to history hist_obj = {"type": obj.type, "timestamp": timestamp, "params": obj.fetch_params(), "client": self._name} # Special case if it's a Text_box object, we need to get the correct box id if hist_obj["type"] == "Text_box": hist_obj["id"] = obj.id_counter hist_obj["owner"] = self._name self.add_to_hist(hist_obj) def switch_config(self, event): """ Switch between different modes :param event: Action by the user : a mouse click on either modes, colors or font sizes :return: None """ if event == "quit": self.set_config(["mode"], "quit") # We go through each mode, color and font size to see if that mode should be triggered by the event else: for mod in self.__modes: if mod.is_triggered(event): self.set_config(["mode"], mod.name) for col in self.__colors: if col.is_triggered(event): self.set_config(["text_box", "text_color"], col.color) self.set_config(["active_color"], col.color) for font_size_ in self.__font_sizes: if font_size_.is_triggered(event): self.set_config(["font_size"], font_size_.font_size) if self.__auth_box.is_triggered(event): self._erasing_auth = not self._erasing_auth self.__auth_box.switch(self.__screen, self._erasing_auth, self.__modification_allowed, self._name) self._hist["auth"] = [self._name, self._erasing_auth] if self.__save_box.is_triggered(event): self.__save_box.save(self.__screen, self) print("Le dessin a été sauvegardé dans le dossier") def set_active_box(self, box, new=True): """ A method specific to text boxes : select an existing box or one that has just been created to edit. This box is thus said to be "active" :param box: instance of the TextBox class :param new: boolean to specify if the box was just created or already existed :return: """ # If the selected box is already the active one, do nothing if box == self.active_box: return # If there is a box that is active we must turn it into "inactive" if self.active_box is not None: # Change its color to the "inactive color" self.active_box.set_textbox_color(self.get_config(["text_box", "inactive_color"])) # Select the id of previous active box id_counter = self.active_box.id_counter # Find the previous active box and change its color in history for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']: if action['id'] == id_counter: action["params"]["text"] = self.active_box.get_textbox_text() action['params']["box_color"] = self.get_config(["text_box", "inactive_color"]) # Render it self.active_box.draw(self.__screen) # If selected box already exists on the whiteboard we must turn it into "active" if not new: id_counter = box.id_counter for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']: if action['id'] == id_counter: action['params']["box_color"] = self.get_config(["text_box", "active_color"]) # Draw the newly activated box self.active_box = box self.active_box.draw(self.__screen) pygame.display.flip() def draw_action(self, action): """ Draw the result of an action by the user on the whiteboard :param action: usually a mouse action by the user :return: """ if action["type"] == "Point": draw_point(action["params"], self.__screen) if action["type"] == "Line": draw_line(action["params"], self.__screen) if action["type"] == "Text_box": draw_textbox(action["params"], self.__screen) if action["type"] == "rect": draw_rect(action["params"], self.__screen) if action["type"] == "circle": draw_circle(action["params"], self.__screen) def load_actions(self, hist): """ Load actions from history :param hist: list of dict representing the history of actions in the whiteboard session :return: """ # Sort actions chronologically sred = sorted(hist["actions"], key=lambda value: value["timestamp"]) # Go through each action and draw it for action in sred: self.draw_action(action) pygame.display.flip() def start(self, connexion_avec_serveur): """ Start and run a whiteboard window :param connexion_avec_serveur: socket to connect with server (socket.socket) :return: """ # Initialize timestamp last_timestamp_sent = 0 while not self.is_done(): # Browse all events done by user for event in pygame.event.get(): # If user closes the window, quit the whiteboard if self.get_config(["mode"]) == "quit": self.end() break # Use specific handling method for current drawing mode self.__handler[self.get_config(["mode"])].handle_all(event) # msg_a_envoyer["message"] = "CARRY ON" # Send dict history to server if self._hist["auth"] != [self._name, self._erasing_auth]: self._hist["auth"] = [] new_modifs = [modif for modif in self.get_hist()["actions"] if (modif["timestamp"] > last_timestamp_sent and self._name == modif["client"])] message_a_envoyer = {"message": "", 'actions': new_modifs, "auth": self._hist["auth"]} connexion_avec_serveur.send(dict_to_binary(message_a_envoyer)) self._hist["auth"] = [] # Update last timestamp sent if new_modifs: last_timestamp_sent = max([modif["timestamp"] for modif in new_modifs]) # Dict received from server try: new_hist = binary_to_dict(connexion_avec_serveur.recv(2 ** 24)) except (ConnectionResetError, ConnectionAbortedError) as e: print("Le serveur a été éteint, veuillez le relancer") self._done = True pass # Consider actions made by another client after new_last_timestamp new_actions = [action for action in new_hist["actions"] if action["client"] != self._name] for action in new_actions: # Here there are two cases, a new figure (point, line, rect, circle, new text box) is created or an # existing text box is modified. For this second case, we use the variable "matched" as indicator matched = False if action["type"] == "Text_box": # Find the text box id for textbox in [x for x in self._hist["actions"] if x["type"] == "Text_box"]: if action["id"] == textbox["id"]: # Modify it with the newly acquired parameters from server textbox["params"]["text"], textbox["params"]["w"] = action["params"]["text"], \ action["params"]["w"] action_to_update_textbox = action for element in self.get_text_boxes(): if element.id_counter == action["id"]: self.del_text_box(element) self.append_text_box(TextBox(**action_to_update_textbox["params"])) # Draw the modified text box with updated parameters self.clear_screen() self.load_actions(self._hist) matched = True # If we are in the first case, we add the new actions to history and draw them if not matched: self.add_to_hist(action) if action["type"] == "Text_box": self.append_text_box(TextBox(**action["params"])) self.draw_action(action) if self._name in new_hist["auth"]: new_hist["auth"].remove(self._name) if new_hist["auth"] != self.__modification_allowed: self.__modification_allowed = copy.deepcopy(new_hist["auth"]) pygame.display.flip() # Once we are done, we quit pygame and send end message pygame.quit() print("Fermeture de la connexion") message_a_envoyer["message"] = "END" try: connexion_avec_serveur.send(dict_to_binary(message_a_envoyer)) except (ConnectionResetError, BrokenPipeError) as e: print("Il n'y a pas de message à envoyer au serveur") connexion_avec_serveur.close() def start_local(self): """ Starts Whiteboard locally. Used to test stuff and debug. :return: """ while not self.is_done(): for event in pygame.event.get(): if self.get_config(["mode"]) == "quit": self.end() break self.__handler[self.get_config(["mode"])].handle_all(event) pygame.display.flip() pygame.quit()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
pyfaddist/yafcorse
refs/heads/main
{"/tests/conftest.py": ["/src/yafcorse/__init__.py"], "/tests/test_ceate_extensions.py": ["/src/yafcorse/__init__.py"], "/tests/test_origins_function.py": ["/src/yafcorse/__init__.py"]}
└── ├── src │ └── yafcorse │ └── __init__.py └── tests ├── conftest.py ├── test_ceate_extensions.py ├── test_default_configuration.py ├── test_origins_function.py ├── test_preflight_request.py └── test_simple_request.py
/src/yafcorse/__init__.py
import re from typing import Callable, Iterable from flask import Flask, Response, request # Yet Another Flask CORS Extension # -------------------------------- # Based on https://developer.mozilla.org/de/docs/Web/HTTP/CORS # DEFAULT_CONFIGURATION = { # 'origins': '*', # 'allowed_methods': ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'], # 'allowed_headers': '*', # 'allow_credentials': True, # 'cache_max_age': str(60 * 5) # } DEFAULT_CONFIGURATION = { 'origins': None, 'allowed_methods': [], 'allowed_headers': None, 'allow_credentials': False, 'cache_max_age': None } class Yafcorse(object): def __init__(self, configuration: dict = DEFAULT_CONFIGURATION, app: Flask = None) -> None: super().__init__() self.__initialized = False self.__origins = configuration.get('origins', DEFAULT_CONFIGURATION.get('origins')) self.__regex_origin_patterns = configuration.get('origin_patterns', None) self.__allowed_methods = configuration.get('allowed_methods', DEFAULT_CONFIGURATION.get('allowed_methods')) self.__allowed_headers = configuration.get('allowed_headers', DEFAULT_CONFIGURATION.get('allowed_headers')) self.__allow_credentials = configuration.get('allow_credentials', DEFAULT_CONFIGURATION.get('allow_credentials')) self.__max_age = configuration.get('cache_max_age', DEFAULT_CONFIGURATION.get('cache_max_age')) self.__allowed_methods_value = '' self.__allowed_headers_value = '' self.init_app(app) def init_app(self, app: Flask): if not self.__initialized and app: self.__allowed_methods_value = ', '.join(self.__allowed_methods) self.__allowed_methods = [m.strip().lower() for m in self.__allowed_methods] self.__allowed_headers_value = ', '.join(self.__allowed_headers) self.__allowed_headers = [h.strip().lower() for h in self.__allowed_headers] if not isinstance(self.__origins, str) and isinstance(self.__origins, (list, tuple, Iterable)): self.__validate_origin = _check_if_contains_origin(self.__origins) elif isinstance(self.__origins, Callable): self.__validate_origin = self.__origins elif self.__regex_origin_patterns is not None: self.__validate_origin = _check_if_regex_match_origin(self.__regex_origin_patterns) else: self.__validate_origin = _check_if_asterisk_origin(self.__origins) app.after_request(self.__handle_response) app.extensions['yafcorse'] = self self.__initialized = True def __append_headers(self, response: Response, origin: str, is_preflight_request: bool = False): response.headers.add_header('Access-Control-Allow-Origin', origin) if 'Access-Control-Request-Method' in request.headers \ and request.headers.get('Access-Control-Request-Method', '').strip().lower() in self.__allowed_methods: response.headers.add_header('Access-Control-Allow-Methods', self.__allowed_methods_value) if 'Access-Control-Request-Headers' in request.headers \ and _string_list_in(request.headers.get('Access-Control-Request-Headers').split(','), self.__allowed_headers): response.headers.add_header('Access-Control-Allow-Headers', self.__allowed_headers_value) if self.__allow_credentials: response.headers.add_header('Access-Control-Allow-Credentials', 'true') if is_preflight_request: response.headers.add_header('Access-Control-Max-Age', self.__max_age) def __handle_response(self, response: Response): is_preflight_request = request.method == 'OPTIONS' if not is_preflight_request and 'Origin' not in request.headers: return response origin = request.headers.get('Origin') if not self.__validate_origin(origin): return response self.__append_headers(response, origin, is_preflight_request) return response def _string_list_in(target: list[str], source: list[str]): contained = [element for element in target if element.strip().lower() in source] return contained == target def _check_if_regex_match_origin(patterns): compiled_patterns = [re.compile(p) for p in patterns] def execute_check(origin): for matcher in compiled_patterns: if matcher.match(origin): return True return False execute_check.__name__ = _check_if_regex_match_origin.__name__ return execute_check def _check_if_contains_origin(origins): def execute_check(origin): for o in origins: if o == origin: return True return False execute_check.__name__ = _check_if_contains_origin.__name__ return execute_check def _check_if_asterisk_origin(origins): allow_all = origins == '*' def execute_check(origin): return allow_all and origin is not None execute_check.__name__ = _check_if_asterisk_origin.__name__ return execute_check
/tests/conftest.py
import pytest from flask import Flask from yafcorse import Yafcorse @pytest.fixture() def app(): app = Flask(__name__) cors = Yafcorse({ 'origins': '*', 'allowed_methods': ['GET', 'POST', 'PUT'], 'allowed_headers': ['Content-Type', 'X-Test-Header'], 'allow_credentials': True, 'cache_max_age': str(60 * 5) }) cors.init_app(app) return app @pytest.fixture() def client(app: Flask): return app.test_client()
/tests/test_ceate_extensions.py
from flask.app import Flask from yafcorse import Yafcorse def test_extension(app: Flask): assert app.extensions.get('yafcorse') is not None assert isinstance(app.extensions.get('yafcorse'), Yafcorse)
/tests/test_default_configuration.py
# def test_no_cors_enabled(): # assert False
/tests/test_origins_function.py
import pytest from flask import Flask, Response from flask.testing import FlaskClient from yafcorse import Yafcorse @pytest.fixture() def local_app(): app = Flask(__name__) cors = Yafcorse({ 'allowed_methods': ['GET', 'POST', 'PUT'], 'allowed_headers': ['Content-Type', 'X-Test-Header'], 'origins': lambda origin: origin == 'https://from_lambda' }) cors.init_app(app) return app @pytest.fixture() def local_client(local_app: Flask): return local_app.test_client() def test_origin_function(local_client: FlaskClient): response: Response = local_client.options('/some-request', headers={ 'Origin': 'https://from_lambda' }) assert response.status_code == 404 assert 'Access-Control-Allow-Origin'.lower() in response.headers assert 'Access-Control-Max-Age'.lower() in response.headers assert response.headers.get('Access-Control-Allow-Origin') is not None assert response.headers.get('Access-Control-Allow-Origin') == 'https://from_lambda' assert response.headers.get('Access-Control-Max-Age') is not None assert response.headers.get('Access-Control-Max-Age') != '' def test_origin_function_fail(local_client: FlaskClient): response: Response = local_client.options('/some-request', headers={ 'Origin': 'https://other_than_lambda' }) assert response.status_code == 404 assert 'Access-Control-Allow-Origin'.lower() not in response.headers assert 'Access-Control-Max-Age'.lower() not in response.headers
/tests/test_preflight_request.py
from flask import Response from flask.testing import FlaskClient # def test_with_origin(client: FlaskClient): # response: Response = client.options('/some-request', headers={ # 'Access-Control-Request-Method': 'POST', # 'Access-Control-Request-Headers': 'Content-Type, X-Custom', # 'Origin': 'https://test.org' # }) # assert response.status_code == 404 # assert 'Access-Control-Max-Age' in response.headers # assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org' def test_with_origin(client: FlaskClient): response: Response = client.options('/some-request', headers={ 'Origin': 'https://test.org' }) assert response.status_code == 404 assert 'Access-Control-Allow-Origin'.lower() in response.headers assert 'Access-Control-Max-Age'.lower() in response.headers assert response.headers.get('Access-Control-Allow-Origin') is not None assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org' assert response.headers.get('Access-Control-Max-Age') is not None assert response.headers.get('Access-Control-Max-Age') != '' def test_without_origin(client: FlaskClient): response: Response = client.options('/some-request', headers={ }) assert response.status_code == 404 assert 'Access-Control-Allow-Origin'.lower() not in response.headers assert 'Access-Control-Max-Age'.lower() not in response.headers assert 'Access-Control-Allow-Methods'.lower() not in response.headers assert 'Access-Control-Allow-Headers'.lower() not in response.headers def test_allow_method(client: FlaskClient): response: Response = client.options('/some-request', headers={ 'Access-Control-Request-Method': 'POST', 'Origin': 'https://test.org' }) assert response.status_code == 404 assert 'Access-Control-Allow-Methods'.lower() in response.headers assert 'POST' in response.headers.get('Access-Control-Allow-Methods') assert 'Access-Control-Max-Age'.lower() in response.headers assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org' assert 'Access-Control-Allow-Headers'.lower() not in response.headers def test_dont_allow_method(client: FlaskClient): response: Response = client.options('/some-request', headers={ 'Access-Control-Request-Method': 'PATCH', 'Origin': 'https://test.org' }) assert response.status_code == 404 assert 'Access-Control-Allow-Methods'.lower() not in response.headers assert 'Access-Control-Max-Age'.lower() in response.headers assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org' assert 'Access-Control-Allow-Headers'.lower() not in response.headers def test_allow_headers(client: FlaskClient): response: Response = client.options('/some-request', headers={ 'Access-Control-Request-Headers': 'Content-Type, X-Test-Header', 'Origin': 'https://test.org' }) assert response.status_code == 404 assert 'Access-Control-Allow-Headers'.lower() in response.headers assert 'Content-Type' in response.headers.get('Access-Control-Allow-Headers') assert 'X-Test-Header' in response.headers.get('Access-Control-Allow-Headers') assert 'Access-Control-Max-Age'.lower() in response.headers assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org' assert 'Access-Control-Allow-Methods'.lower() not in response.headers def test_dont_allow_headers(client: FlaskClient): response: Response = client.options('/some-request', headers={ 'Access-Control-Request-Headers': 'Content-Type, X-Test-Header, X-Not-Allowed', 'Origin': 'https://test.org' }) assert response.status_code == 404 assert 'Access-Control-Allow-Headers'.lower() not in response.headers assert 'Access-Control-Max-Age'.lower() in response.headers assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org' assert 'Access-Control-Allow-Methods'.lower() not in response.headers
/tests/test_simple_request.py
from flask import Flask, Response from flask.testing import FlaskClient def test_simple_request(client: FlaskClient): response: Response = client.get('/some-request', headers={ 'Origin': 'https://test.org' }) assert response.status_code == 404 assert 'Access-Control-Allow-Origin'.lower() in response.headers assert 'Access-Control-Max-Age'.lower() not in response.headers assert response.headers.get('Access-Control-Allow-Origin') is not None assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
ericfourrier/auto-clean
refs/heads/develop
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/outliersdetection.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/naimputer.py": ["/autoc/utils/helpers.py", "/autoc/utils/corrplot.py", "/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/utils/helpers.py", "/autoc/explorer.py", "/autoc/exceptions.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py", "/autoc/exceptions.py"]}
└── ├── autoc │ ├── __init__.py │ ├── exceptions.py │ ├── explorer.py │ ├── naimputer.py │ ├── outliersdetection.py │ ├── preprocess.py │ └── utils │ ├── corrplot.py │ ├── getdata.py │ └── helpers.py ├── setup.py └── test.py
/autoc/__init__.py
__all__ = ["explorer", "naimputer"] from .explorer import DataExploration from .naimputer import NaImputer from .preprocess import PreProcessor from .utils.getdata import get_dataset # from .preprocess import PreProcessor
/autoc/exceptions.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @author: efourrier Purpose : File with all custom exceptions """ class NotNumericColumn(Exception): """ The column should be numeric """ pass class NumericError(Exception): """ The column should not be numeric """ pass # class NotFactor
/autoc/explorer.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @author: efourrier Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn. The Goal of this module is to rely on a dataframe structure for modelling g """ ######################################################### # Import modules and global helpers ######################################################### import pandas as pd import numpy as np from numpy.random import permutation from autoc.utils.helpers import cserie from pprint import pprint from .exceptions import NotNumericColumn class DataExploration(object): """ This class is designed to provide infos about the dataset such as number of missing values, number of unique values, constant columns, long strings ... For the most useful methods it will store the result into a attributes When you used a method the output will be stored in a instance attribute so you don't have to compute the result again. """ def __init__(self, data, copy=False): """ Parameters ---------- data : pandas.DataFrame the data you want explore copy: bool True if you want make a copy of DataFrame, default False Examples -------- explorer = DataExploration(data = your_DataFrame) explorer.structure() : global structure of your DataFrame explorer.psummary() to get the a global snapchot of the different stuff detected data_cleaned = explorer.basic_cleaning() to clean your data. """ assert isinstance(data, pd.DataFrame) self.is_data_copy = copy self.data = data if not self.is_data_copy else data.copy() # if not self.label: # print("""the label column is empty the data will be considered # as a dataset of predictors""") self._nrow = len(self.data.index) self._ncol = len(self.data.columns) self._dfnumi = (self.data.dtypes == float) | ( self.data.dtypes == int) self._dfnum = cserie(self._dfnumi) self._dfchari = (self.data.dtypes == object) self._dfchar = cserie(self._dfchari) self._nacolcount = pd.DataFrame() self._narowcount = pd.DataFrame() self._count_unique = pd.DataFrame() self._constantcol = [] self._dupcol = [] self._nearzerovar = pd.DataFrame() self._corrcolumns = [] self._dict_info = {} self._structure = pd.DataFrame() self._string_info = "" self._list_other_na = {'unknown', 'na', 'missing', 'n/a', 'not available'} # def get_label(self): # """ return the Serie of label you want predict """ # if not self.label: # print("""the label column is empty the data will be considered # as a dataset of predictors""") # return self.data[self.label] def is_numeric(self, colname): """ Returns True if a the type of column is numeric else False Parameters ---------- colname : str the name of the column of the self.data Notes ------ df._get_numeric_data() is a primitive from pandas to get only numeric data """ dtype_col = self.data.loc[:, colname].dtype return (dtype_col == int) or (dtype_col == float) def is_int_factor(self, colname, threshold=0.1): """ Returns True if a the type of column is numeric else False Parameters ---------- colname : str the name of the column of the self.data threshold : float colname is an 'int_factor' if the number of unique values < threshold * nrows """ dtype_col = self.data.loc[:, colname].dtype if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]): return True else: return False def to_lowercase(self): """ Returns a copy of dataset with data to lower """ return self.data.applymap(lambda x: x.lower() if type(x) == str else x) def where_numeric(self): """ Returns a Boolean Dataframe with True for numeric values False for other """ return self.data.applymap(lambda x: isinstance(x, (int, float))) def count_unique(self): """ Return a serie with the number of unique value per columns """ if len(self._count_unique): return self._count_unique self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0) return self._count_unique def sample_df(self, pct=0.05, nr=10, threshold=None): """ sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)""" a = max(int(pct * float(len(self.data.index))), nr) if threshold: a = min(a, threshold) return self.data.loc[permutation(self.data.index)[:a],:] def sign_summary(self, subset=None): """ Returns the number and percentage of positive and negative values in a column, a subset of columns or all numeric columns of the dataframe. Parameters ---------- subset : label or list Column name or list of column names to check. Returns ------- summary : pandas.Series or pandas.DataFrame Summary of the signs present in the subset """ if subset: subs = subs if isinstance(subs, list) else [subs] if sum(col not in self._dfnum for col in subs) > 0: raise NotNumericColumn('At least one of the columns you passed ' \ 'as argument are not numeric.') else: subs = self._dfnum summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative', 'NumOfPositive', 'PctOfPositive']) summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0) summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0) summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data) summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data) return summary @property def total_missing(self): """ Count the total number of missing values """ # return np.count_nonzero(self.data.isnull().values) # optimized for # speed return self.nacolcount().Nanumber.sum() def nacolcount(self): """ count the number of missing values per columns """ if len(self._nacolcount): return self._nacolcount self._nacolcount = self.data.isnull().sum(axis=0) self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber']) self._nacolcount['Napercentage'] = self._nacolcount[ 'Nanumber'] / (self._nrow) return self._nacolcount def narowcount(self): """ count the number of missing values per columns """ if len(self._narowcount): return self._narowcount self._narowcount = self.data.isnull().sum(axis=1) self._narowcount = pd.DataFrame( self._narowcount, columns=['Nanumber']) self._narowcount['Napercentage'] = self._narowcount[ 'Nanumber'] / (self._ncol) return self._narowcount def detect_other_na(self, verbose=True, auto_replace=False): """ Detect missing values encoded by the creator of the dataset like 'Missing', 'N/A' ... Parameters ---------- verbose : bool True if you want to print some infos auto_replace: bool True if you want replace this value by np.nan, default False Returns ------- an DataFrame of boolean if not auto_replace else cleaned DataFrame with self._list_other_na replaced by np.nan Notes ------ * You can use na_values parameter in pandas.read_csv to specify the missing values to convert to nan a priori * Speed can be improved """ res = self.to_lowercase().applymap(lambda x: x in self._list_other_na) print("We detected {} other type of missing values".format(res.sum().sum())) if auto_replace: return self.data.where((res == False), np.nan) else: return res @property def nacols_full(self): """ Returns a list of columns with only missing values """ return cserie(self.nacolcount().Nanumber == self._nrow) @property def narows_full(self): """ Returns an index of rows with only missing values """ return self.narowcount().Nanumber == self._ncol # def manymissing2(self, pct=0.9, axis=0, index=False): # """ identify columns of a dataframe with many missing values ( >= a), if # row = True row either. # - the output is a index """ # if axis == 1: # self.manymissing = self.narowcount() # self.manymissing = self.manymissing['Napercentage'] >= pct # elif axis == 0: # self.manymissing = self.nacolcount() # self.manymissing = self.manymissing['Napercentage'] >= pct # else: # raise ValueError # if index: # return manymissing # else: # return cserie(manymissing) def manymissing(self, pct=0.9, axis=0): """ identify columns of a dataframe with many missing values ( >= pct), if row = True row either. - the output is a list """ if axis == 1: self._manymissingrow = self.narowcount() self._manymissingrow = self._manymissingrow['Napercentage'] >= pct return self._manymissingrow elif axis == 0: self._manymissingcol = self.nacolcount() self._manymissingcol = cserie( self._manymissingcol['Napercentage'] >= pct) return self._manymissingcol else: raise ValueError("Axis should be 1 for rows and o for columns") def df_len_string(self, drop_num=False): """ Return a Series with the max of the length of the string of string-type columns """ if drop_num: return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0) else: return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0) def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs): """ identify id or key columns as an index if index_format = True or as a list if index_format = False """ if not dropna: col_to_keep = self.sample_df( pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0) if len(col_to_keep) == 0: return [] is_key_index = col_to_keep is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply( lambda x: len(x.unique()) == len(x), axis=0) if index_format: return is_key_index else: return cserie(is_key_index) else: col_to_keep = self.sample_df( pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0) if len(col_to_keep) == 0: return [] is_key_index = col_to_keep is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply( lambda x: x.nunique() == len(x.dropna()), axis=0) if index_format: return is_key_index else: return cserie(is_key_index) def constantcol(self, **kwargs): """ identify constant columns """ # sample to reduce computation time if len(self._constantcol): return self._constantcol col_to_keep = self.sample_df( **kwargs).apply(lambda x: len(x.unique()) == 1, axis=0) if len(cserie(col_to_keep)) == 0: return [] self._constantcol = cserie(self.data.loc[:, col_to_keep].apply( lambda x: len(x.unique()) == 1, axis=0)) return self._constantcol def constantcol2(self, **kwargs): """ identify constant columns """ return cserie((self.data == self.data.ix[0]).all()) def factors(self, nb_max_levels=10, threshold_value=None, index=False): """ return a list of the detected factor variable, detection is based on ther percentage of unicity perc_unique = 0.05 by default. We follow here the definition of R factors variable considering that a factor variable is a character variable that take value in a list a levels this is a bad implementation Arguments ---------- nb_max_levels: the mac nb of levels you fix for a categorical variable threshold_value : the nb of of unique value in percentage of the dataframe length index : if you want the result as an index or a list """ if threshold_value: max_levels = max(nb_max_levels, threshold_value * self._nrow) else: max_levels = nb_max_levels def helper_factor(x, num_var=self._dfnum): unique_value = set() if x.name in num_var: return False else: for e in x.values: if len(unique_value) >= max_levels: return False else: unique_value.add(e) return True if index: return self.data.apply(lambda x: helper_factor(x)) else: return cserie(self.data.apply(lambda x: helper_factor(x))) @staticmethod def serie_quantiles(array, nb_quantiles=10): binq = 1.0 / nb_quantiles if type(array) == pd.Series: return array.quantile([binq * i for i in xrange(nb_quantiles + 1)]) elif type(array) == np.ndarray: return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)]) else: raise("the type of your array is not supported") def dfquantiles(self, nb_quantiles=10, only_numeric=True): """ this function gives you a all the quantiles of the numeric variables of the dataframe only_numeric will calculate it only for numeric variables, for only_numeric = False you will get NaN value for non numeric variables """ binq = 1.0 / nb_quantiles if only_numeric: return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)]) else: return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)]) def numeric_summary(self): """ provide a more complete sumary than describe, it is using only numeric value """ df = self.data.loc[:, self._dfnumi] func_list = [df.count(), df.min(), df.quantile(0.25), df.quantile(0.5), df.mean(), df.std(), df.mad(), df.skew(), df.kurt(), df.quantile(0.75), df.max()] results = [f for f in func_list] return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile', 'Median', 'Mean', 'Std', 'Mad', 'Skewness', 'Kurtosis', 'Thirdquartile', 'Max']).T def infer_types(self): """ this function will try to infer the type of the columns of data""" return self.data.apply(lambda x: pd.lib.infer_dtype(x.values)) def structure(self, threshold_factor=10): """ this function return a summary of the structure of the pandas DataFrame data looking at the type of variables, the number of missing values, the number of unique values """ if len(self._structure): return self._structure dtypes = self.data.dtypes nacolcount = self.nacolcount() nb_missing = nacolcount.Nanumber perc_missing = nacolcount.Napercentage nb_unique_values = self.count_unique() dtype_infer = self.infer_types() dtypes_r = self.data.apply(lambda x: "character") dtypes_r[self._dfnumi] = "numeric" dtypes_r[(dtypes_r == 'character') & ( nb_unique_values <= threshold_factor)] = 'factor' constant_columns = (nb_unique_values == 1) na_columns = (perc_missing == 1) is_key = nb_unique_values == self._nrow string_length = self.df_len_string(drop_num=False) # is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns) dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing, 'nb_missing': nb_missing, 'is_key': is_key, 'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes, 'constant_columns': constant_columns, 'na_columns': na_columns, 'dtype_infer': dtype_infer, 'string_length': string_length} self._structure = pd.concat(dict_str, axis=1) self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing', 'nb_unique_values', 'constant_columns', 'na_columns', 'is_key', 'dtype_infer', 'string_length']] return self._structure def findupcol(self, threshold=100, **kwargs): """ find duplicated columns and return the result as a list of list """ df_s = self.sample_df(threshold=100, **kwargs).T dup_index_s = (df_s.duplicated()) | ( df_s.duplicated(keep='last')) if len(cserie(dup_index_s)) == 0: return [] df_t = (self.data.loc[:, dup_index_s]).T dup_index = df_t.duplicated() dup_index_complet = cserie( (dup_index) | (df_t.duplicated(keep='last'))) l = [] for col in cserie(dup_index): index_temp = self.data[dup_index_complet].apply( lambda x: (x == self.data[col])).sum() == self._nrow temp = list(self.data[dup_index_complet].columns[index_temp]) l.append(temp) self._dupcol = l return self._dupcol def finduprow(self, subset=[]): """ find duplicated rows and return the result a sorted dataframe of all the duplicates subset is a list of columns to look for duplicates from this specific subset . """ if sum(self.data.duplicated()) == 0: print("there is no duplicated rows") else: if subset: dup_index = (self.data.duplicated(subset=subset)) | ( self.data.duplicated(subset=subset, keep='last')) else: dup_index = (self.data.duplicated()) | ( self.data.duplicated(keep='last')) if subset: return self.data[dup_index].sort(subset) else: return self.data[dup_index].sort(self.data.columns[0]) def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False): """ identify predictors with near-zero variance. freq_cut: cutoff ratio of frequency of most common value to second most common value. unique_cut: cutoff percentage of unique value over total number of samples. save_metrics: if False, print dataframe and return NON near-zero var col indexes, if True, returns the whole dataframe. """ nb_unique_values = self.count_unique() percent_unique = 100 * nb_unique_values / self._nrow def helper_freq(x): if nb_unique_values[x.name] == 0: return 0.0 elif nb_unique_values[x.name] == 1: return 1.0 else: return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1] freq_ratio = self.data.apply(helper_freq) zerovar = (nb_unique_values == 0) | (nb_unique_values == 1) nzv = ((freq_ratio >= freq_cut) & ( percent_unique <= unique_cut)) | (zerovar) if save_metrics: return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns) else: print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)) return nzv[nzv == True].index def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False): """ implementation of the Recursive Pairwise Elimination. The function finds the highest correlated pair and removes the most highly correlated feature of the pair, then repeats the process until the threshold 'cutoff' is reached. will return a dataframe is 'data_frame' is set to True, and the list of predictors to remove oth Adaptation of 'findCorrelation' function in the caret package in R. """ res = [] df = self.data.copy(0) cor = df.corr(method=method) for col in cor.columns: cor[col][col] = 0 max_cor = cor.max() if print_mode: print(max_cor.max()) while max_cor.max() > cutoff: A = max_cor.idxmax() B = cor[A].idxmax() if cor[A].mean() > cor[B].mean(): cor.drop(A, 1, inplace=True) cor.drop(A, 0, inplace=True) res += [A] else: cor.drop(B, 1, inplace=True) cor.drop(B, 0, inplace=True) res += [B] max_cor = cor.max() if print_mode: print(max_cor.max()) if data_frame: return df.drop(res, 1) else: return res self._corrcolumns = res def get_infos_consistency(self): """ Update self._dict_info and returns infos about duplicates rows and cols, constant col,narows and cols """ infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR', 'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'}, 'dup_columns': {'value': self.findupcol(), 'level': 'ERROR', 'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())}, 'constant_columns': {'value': self.constantcol(), 'level': 'WARNING', 'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())}, 'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR', 'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'}, 'nacols_full': {'value': self.nacols_full, 'level': 'ERROR', 'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)} } # update self._dict_info.update(infos) return infos def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05): """ Update self._dict_info and returns infos about duplicates rows and cols, constant col, narows and cols """ nacolcount_p = self.nacolcount().Napercentage infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None}, 'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None}, 'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'}, 'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'}, } # update self._dict_info.update(infos) return infos def print_infos(self, infos="consistency", print_empty=False): """ pprint of get_infos Parameters ---------- print_empty: bool: False if you don't want print the empty infos ( no missing colum for example)""" if infos == "consistency": dict_infos = self.get_infos_consistency() if not print_empty: dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0} pprint(dict_infos) def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10, threshold=100, string_threshold=40, dynamic=False): """ This function will print you a summary of the dataset, based on function designed is this package - Output : python print It will store the string output and the dictionnary of results in private variables """ nacolcount_p = self.nacolcount().Napercentage if dynamic: print('there are {0} duplicated rows\n'.format( self.data.duplicated().sum())) print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph, cserie((nacolcount_p > manymissing_ph)))) print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format( manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)))) print('the detected keys of the dataset are:\n{0} \n'.format( self.detectkey())) print('the duplicated columns of the dataset are:\n{0}\n'.format( self.findupcol(threshold=100))) print('the constant columns of the dataset are:\n{0}\n'.format( self.constantcol())) print('the columns with nearzerovariance are:\n{0}\n'.format( list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv)))) print('the columns highly correlated to others to remove are:\n{0}\n'.format( self.findcorr(data_frame=False))) print('these columns contains big strings :\n{0}\n'.format( cserie(self.df_len_string() > string_threshold))) else: self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()), 'many_missing_percentage': manymissing_ph, 'manymissing_columns': cserie((nacolcount_p > manymissing_ph)), 'low_missing_percentage': manymissing_pl, 'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'keys_detected': self.detectkey(), 'dup_columns': self.findupcol(threshold=100), 'constant_columns': self.constantcol(), 'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv), 'high_correlated_col': self.findcorr(data_frame=False), 'big_strings_col': cserie(self.df_len_string() > string_threshold) } self._string_info = u""" there are {nb_duplicated_rows} duplicated rows\n the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n you should fill them with median or most common value\n the detected keys of the dataset are:\n{keys_detected} \n the duplicated columns of the dataset are:\n{dup_columns}\n the constant columns of the dataset are:\n{constant_columns}\n the columns with nearzerovariance are:\n{nearzerovar_columns}\n the columns highly correlated to others to remove are:\n{high_correlated_col}\n these columns contains big strings :\n{big_strings_col}\n """.format(**self._dict_info) print(self._string_info) def metadata(self): """ Return a dict/json full of infos about the dataset """ meta = {} meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes meta['columns_name'] = self.data.columns.tolist() meta['columns_name_n'] = [e.lower() for e in self.data.columns] meta['nb_rows'] = self.data.shape[0] meta['nb_columns'] = self.data.shape[1] # drop dtype_p for mongodb compatibility structure_data = self.structure().drop(labels='dtypes_p', axis=1) structure_data = structure_data.to_dict('index') meta['structure'] = structure_data meta['numeric_summary'] = self.numeric_summary().to_dict('index') return meta
/autoc/naimputer.py
from autoc.explorer import DataExploration, pd from autoc.utils.helpers import cserie import seaborn as sns import matplotlib.pyplot as plt #from autoc.utils.helpers import cached_property from autoc.utils.corrplot import plot_corrmatrix import numpy as np from scipy.stats import ttest_ind from scipy.stats.mstats import ks_2samp def missing_map(df, nmax=100, verbose=True, yticklabels=False, figsize=(15, 11), *args, **kwargs): """ Returns missing map plot like in amelia 2 package in R """ f, ax = plt.subplots(figsize=figsize) if nmax < df.shape[0]: df_s = df.sample(n=nmax) # sample rows if dataframe too big return sns.heatmap(df_s.isnull(), yticklabels=yticklabels, vmax=1, *args, **kwargs) # class ColumnNaInfo class NaImputer(DataExploration): def __init__(self, *args, **kwargs): super(NaImputer, self).__init__(*args, **kwargs) self.get_data_isna() @property def nacols(self): """ Returns a list of column with at least one missing values """ return cserie(self.nacolcount().Nanumber > 0) @property def nacols_i(self): """ Returns the index of column with at least one missing values """ return cserie(self.nacolcount().Nanumber > 0) def get_overlapping_matrix(self, normalize=True): """ Look at missing values overlapping """ arr = self.data_isna.astype('float').values arr = np.dot(arr.T, arr) if normalize: arr = arr / (arr.max(axis=1)[:, None]) index = self.nacols res = pd.DataFrame(index=index, data=arr, columns=index) return res def infos_na(self, na_low=0.05, na_high=0.90): """ Returns a dict with various infos about missing values """ infos = {} infos['nacolcount'] = self.nacolcount() infos['narowcount'] = self.narowcount() infos['nb_total_na'] = self.total_missing infos['many_na_col'] = self.manymissing(pct=na_high) infos['low_na_col'] = cserie(self.nacolcount().Napercentage < na_low) infos['total_pct_na'] = self.nacolcount().Napercentage.mean() return infos def get_isna(self, col): """ Returns a dummy variable indicating in a observation of a specific col is na or not 0 -> not na , 1 -> na """ return self.data.loc[:, col].isnull().astype(int) @property def data_isna_m(self): """ Returns merged dataframe (data, data_is_na)""" return pd.concat((self.data, self.data_isna), axis=1) def get_data_isna(self, prefix="is_na_", filter_nna=True): """ Returns dataset with is_na columns from the a dataframe with missing values Parameters ---------- prefix : str the name of the prefix that will be append to the column name. filter_nna: bool True if you want remove column without missing values. """ if not filter_nna: cols_to_keep = self.data.columns else: cols_to_keep = self.nacols data_isna = self.data.loc[:, cols_to_keep].isnull().astype(int) data_isna.columns = ["{}{}".format(prefix, c) for c in cols_to_keep] self.data_isna = data_isna return self.data_isna def get_corrna(self, *args, **kwargs): """ Get matrix of correlation of na """ return self.data_isna.corr(*args, **kwargs) def corrplot_na(self, *args, **kwargs): """ Returns a corrplot of data_isna """ print("This function is deprecated") plot_corrmatrix(self.data_isna, *args, **kwargs) def plot_corrplot_na(self, *args, **kwargs): """ Returns a corrplot of data_isna """ plot_corrmatrix(self.data_isna, *args, **kwargs) def plot_density_m(self, colname, subset=None, prefix="is_na_", size=6, *args, **kwargs): """ Plot conditionnal density plot from all columns or subset based on is_na_colname 0 or 1""" colname_na = prefix + colname density_columns = self.data.columns if subset is None else subset # filter only numeric values and different values from is_na_col density_columns = [c for c in density_columns if ( c in self._dfnum and c != colname)] print(density_columns) for col in density_columns: g = sns.FacetGrid(data=self.data_isna_m, col=colname_na, hue=colname_na, size=size, *args, **kwargs) g.map(sns.distplot, col) def get_isna_mean(self, colname, prefix="is_na_"): """ Returns empirical conditional expectatation, std, and sem of other numerical variable for a certain colname with 0:not_a_na 1:na """ na_colname = "{}{}".format(prefix, colname) cols_to_keep = list(self.data.columns) + [na_colname] measure_var = self.data.columns.tolist() measure_var = [c for c in measure_var if c != colname] functions = ['mean', 'std', 'sem'] return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname)[measure_var].agg(functions).transpose() def get_isna_ttest_s(self, colname_na, colname, type_test="ks"): """ Returns tt test for colanme-na and a colname """ index_na = self.data.loc[:, colname_na].isnull() measure_var = self.data.loc[:, colname].dropna() # drop na vars if type_test == "ttest": return ttest_ind(measure_var[index_na], measure_var[~index_na]) elif type_test == "ks": return ks_2samp(measure_var[index_na], measure_var[~index_na]) def get_isna_ttest(self, colname_na, type_test="ks"): res = pd.DataFrame() col_to_compare = [c for c in self._dfnum if c != colname_na] # remove colname_na for col in col_to_compare: ttest = self.get_isna_ttest_s(colname_na, col, type_test=type_test) res.loc[col, 'pvalue'] = ttest[1] res.loc[col, 'statistic'] = ttest[0] res.loc[col, 'type_test'] = type_test return res def isna_summary(self, colname, prefix="is_na_"): """ Returns summary from one col with describe """ na_colname = "{}{}".format(prefix, colname) cols_to_keep = list(self.data.columns) + [na_colname] return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname).describe().transpose() def delete_narows(self, pct, index=False): """ Delete rows with more na percentage than > perc in data Return the index Arguments --------- pct : float percentage of missing values, rows with more na percentage than > perc are deleted index : bool, default False True if you want an index and not a Dataframe verbose : bool, default False True if you want to see percentage of data discarded Returns -------- - a pandas Dataframe with rows deleted if index=False, index of columns to delete either """ index_missing = self.manymissing(pct=pct, axis=0, index=False) pct_missing = len(index_missing) / len(self.data.index) if verbose: print("There is {0:.2%} rows matching conditions".format( pct_missing)) if not index: return self.data.loc[~index_missing, :] else: return index_missing def fillna_serie(self, colname, threshold_factor=0.1, special_value=None, date_method='ffill'): """ fill values in a serie default with the mean for numeric or the most common factor for categorical variable""" if special_value is not None: # "Missing for example" return self.data.loc[:, colname].fillna(special_value) elif self.data.loc[:, colname].dtype == float: # fill with median return self.data.loc[:, colname].fillna(self.data.loc[:, colname].median()) elif self.is_int_factor(colname, threshold_factor): return self.data.loc[:, colname].fillna(self.data.loc[:, colname].mode()[0]) # fillna for datetime with the method provided by pandas elif self.data.loc[:, colname].dtype == '<M8[ns]': return self.data.loc[:, colname].fillna(method=date_method) else: # Fill with most common value return self.data.loc[:, colname].fillna(self.data.loc[:, colname].value_counts().index[0]) def basic_naimputation(self, columns_to_process=[], threshold=None): """ this function will return a dataframe with na value replaced int the columns selected by the mean or the most common value Arguments --------- - columns_to_process : list of columns name with na values you wish to fill with the fillna_serie function Returns -------- - a pandas DataFrame with the columns_to_process filled with the fillena_serie """ # self.data = self.df.copy() if threshold: columns_to_process = columns_to_process + cserie(self.nacolcount().Napercentage < threshold) self.data.loc[:, columns_to_process] = self.data.loc[ :, columns_to_process].apply(lambda x: self.fillna_serie(colname=x.name)) return self.data def split_tt_na(self, colname, index=False): """ Split the dataset returning the index of test , train """ index_na = self.data.loc[:, colname].isnull() index_test = (index_na == True) index_train = (index_na == False) if index: return index_test, index_train else: return self.data.loc[index_test, :], self.data.loc[index_train, :]
/autoc/outliersdetection.py
""" @author: efourrier Purpose : This is a simple experimental class to detect outliers. This class can be used to detect missing values encoded as outlier (-999, -1, ...) """ from autoc.explorer import DataExploration, pd import numpy as np #from autoc.utils.helpers import cserie from exceptions import NotNumericColumn def iqr(ndarray, dropna=True): if dropna: ndarray = ndarray[~np.isnan(ndarray)] return np.percentile(ndarray, 75) - np.percentile(ndarray, 25) def z_score(ndarray, dropna=True): if dropna: ndarray = ndarray[~np.isnan(ndarray)] return (ndarray - np.mean(ndarray)) / (np.std(ndarray)) def iqr_score(ndarray, dropna=True): if dropna: ndarray = ndarray[~np.isnan(ndarray)] return (ndarray - np.median(ndarray)) / (iqr(ndarray)) def mad_score(ndarray, dropna=True): if dropna: ndarray = ndarray[~np.isnan(ndarray)] return (ndarray - np.median(ndarray)) / (np.median(np.absolute(ndarray - np.median(ndarray))) / 0.6745) class OutliersDetection(DataExploration): """ this class focuses on identifying outliers Parameters ---------- data : DataFrame Examples -------- * od = OutliersDetection(data = your_DataFrame) * od.structure() : global structure of your DataFrame """ def __init__(self, *args, **kwargs): super(OutliersDetection, self).__init__(*args, **kwargs) self.strong_cutoff = {'cutoff_z': 6, 'cutoff_iqr': 6, 'cutoff_mad': 6} self.basic_cutoff = {'cutoff_z': 3, 'cutoff_iqr': 2, 'cutoff_mad': 2} def check_negative_value(self, colname): """ this function will detect if there is at leat one negative value and calculate the ratio negative postive/ """ if not self.is_numeric(colname): NotNumericColumn("The serie should be numeric values") return sum(serie < 0) def outlier_detection_serie_1d(self, colname, cutoff_params, scores=[z_score, iqr_score, mad_score]): if not self.is_numeric(colname): raise("auto-clean doesn't support outliers detection for Non numeric variable") keys = [str(func.__name__) for func in scores] df = pd.DataFrame(dict((key, func(self.data.loc[:, colname])) for key, func in zip(keys, scores))) df['is_outlier'] = 0 for s in keys: cutoff_colname = "cutoff_{}".format(s.split('_')[0]) index_outliers = np.absolute(df[s]) >= cutoff_params[cutoff_colname] df.loc[index_outliers, 'is_outlier'] = 1 return df def check_negative_value(self): """ this will return a the ratio negative/positve for each numeric variable of the DataFrame """ return self.data[self._dfnum].apply(lambda x: self.check_negative_value_serie(x.name)) def outlier_detection_1d(self, cutoff_params, subset=None, scores=[z_score, iqr_score, mad_score]): """ Return a dictionnary with z_score,iqr_score,mad_score as keys and the associate dataframe of distance as value of the dictionnnary""" df = self.data.copy() numeric_var = self._dfnum if subset: df = df.drop(subset, axis=1) df = df.loc[:, numeric_var] # take only numeric variable # if remove_constant_col: # df = df.drop(self.constantcol(), axis = 1) # remove constant variable # df_outlier = pd.DataFrame() for col in df: df_temp = self.outlier_detection_serie_1d(col, cutoff_params, scores) df_temp.columns = [col + '_' + col_name for col_name in df_temp.columns] #df_outlier = pd.concat([df_outlier, df_temp], axis=1) return df_temp
/autoc/preprocess.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @author: efourrier Purpose : The purpose of this class is too automaticely transfrom a DataFrame into a numpy ndarray in order to use an aglorithm """ ######################################################### # Import modules and global helpers ######################################################### from autoc.explorer import DataExploration, pd import numpy as np from numpy.random import permutation from autoc.utils.helpers import cserie from autoc.exceptions import NumericError class PreProcessor(DataExploration): subtypes = ['text_raw', 'text_categorical', 'ordinal', 'binary', 'other'] def __init__(self, *args, **kwargs): super(PreProcessor, self).__init__(*args, **kwargs) self.long_str_cutoff = 80 self.short_str_cutoff = 30 self.perc_unique_cutoff = 0.2 self.nb_max_levels = 20 def basic_cleaning(self,filter_nacols=True, drop_col=None, filter_constantcol=True, filer_narows=True, verbose=True, filter_rows_duplicates=True, inplace=False): """ Basic cleaning of the data by deleting manymissing columns, constantcol, full missing rows, and drop_col specified by the user. """ col_to_remove = [] index_to_remove = [] if filter_nacols: col_to_remove += self.nacols_full if filter_constantcol: col_to_remove += list(self.constantcol()) if filer_narows: index_to_remove += cserie(self.narows_full) if filter_rows_duplicates: index_to_remove += cserie(self.data.duplicated()) if isinstance(drop_col, list): col_to_remove += drop_col elif isinstance(drop_col, str): col_to_remove += [drop_col] else: pass col_to_remove = list(set(col_to_remove)) index_to_remove = list(set(index_to_remove)) if verbose: print("We are removing the folowing columns : {}".format(col_to_remove)) print("We are removing the folowing rows : {}".format(index_to_remove)) if inplace: return self.data.drop(index_to_remove).drop(col_to_remove, axis=1) else: return self.data.copy().drop(index_to_remove).drop(col_to_remove, axis=1) def _infer_subtype_col(self, colname): """ This fonction tries to infer subtypes in order to preprocess them better for skicit learn. You can find the different subtypes in the class variable subtypes To be completed .... """ serie_col = self.data.loc[:, colname] if serie_col.nunique() == 2: return 'binary' elif serie_col.dtype.kind == 'O': if serie_col.str.len().mean() > self.long_str_cutoff and serie_col.nunique()/len(serie_col) > self.perc_unique_cutoff: return "text_long" elif serie_col.str.len().mean() <= self.short_str_cutoff and serie_col.nunique() <= self.nb_max_levels: return 'text_categorical' elif self.is_numeric(colname): if serie_col.dtype == int and serie_col.nunique() <= self.nb_max_levels: return "ordinal" else : return "other" def infer_subtypes(self): """ Apply _infer_subtype_col to the whole DataFrame as a dictionnary """ return {col: {'dtype': self.data.loc[:,col].dtype, 'subtype':self._infer_subtype_col(col)} for col in self.data.columns} def infer_categorical_str(self, colname, nb_max_levels=10, threshold_value=0.01): """ Returns True if we detect in the serie a factor variable A string factor is based on the following caracteristics : ther percentage of unicity perc_unique = 0.05 by default. We follow here the definition of R factors variable considering that a factor variable is a character variable that take value in a list a levels Arguments ---------- nb_max_levels: int the max nb of levels you fix for a categorical variable threshold_value : float the nb of of unique value in percentage of the dataframe length """ # False for numeric columns if threshold_value: max_levels = max(nb_max_levels, threshold_value * self._nrow) else: max_levels = nb_max_levels if self.is_numeric(colname): return False # False for categorical columns if self.data.loc[:, colname].dtype == "category": return False unique_value = set() for i, v in self.data.loc[:, colname], iteritems(): if len(unique_value) >= max_levels: return False else: unique_value.add(v) return True def get_factors(self, nb_max_levels=10, threshold_value=None, index=False): """ Return a list of the detected factor variable, detection is based on ther percentage of unicity perc_unique = 0.05 by default. We follow here the definition of R factors variable considering that a factor variable is a character variable that take value in a list a levels this is a bad implementation Arguments ---------- nb_max_levels: int the max nb of levels you fix for a categorical variable. threshold_value : float the nb of of unique value in percentage of the dataframe length. index: bool False, returns a list, True if you want an index. """ res = self.data.apply(lambda x: self.infer_categorical_str(x)) if index: return res else: return cserie(res) def factors_to_categorical(self, inplace=True, verbose=True, *args, **kwargs): factors_col = self.get_factors(*args, **kwargs) if verbose: print("We are converting following columns to categorical :{}".format( factors_col)) if inplace: self.df.loc[:, factors_col] = self.df.loc[:, factors_col].astype(category) else: return self.df.loc[:, factors_col].astype(category) def remove_category(self, colname, nb_max_levels, replace_value='other', verbose=True): """ Replace a variable with too many categories by grouping minor categories to one """ if self.data.loc[:, colname].nunique() < nb_max_levels: if verbose: print("{} has not been processed because levels < {}".format( colname, nb_max_levels)) else: if self.is_numeric(colname): raise NumericError( '{} is a numeric columns you cannot use this function'.format()) top_levels = self.data.loc[ :, colname].value_counts[0:nb_max_levels].index self.data.loc[~self.data.loc[:, colname].isin( top_levels), colname] = replace_value
/autoc/utils/corrplot.py
import seaborn as sns import matplotlib.pyplot as plt def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True, size=None, figsize=(12, 9), *args, **kwargs): """ Plot correlation matrix of the dataset see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap """ sns.set(context="paper", font="monospace") f, ax = plt.subplots(figsize=figsize) sns.heatmap(df.corr(), vmax=1, square=square, linewidths=linewidths, annot=annot, annot_kws={"size": size}, *args, **kwargs)
/autoc/utils/getdata.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @author: efourrier Purpose : Get data from https://github.com/ericfourrier/autoc-datasets """ import pandas as pd def get_dataset(name, *args, **kwargs): """Get a dataset from the online repo https://github.com/ericfourrier/autoc-datasets (requires internet). Parameters ---------- name : str Name of the dataset 'name.csv' """ path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name) return pd.read_csv(path, *args, **kwargs)
/autoc/utils/helpers.py
# -*- coding: utf-8 -*- """ @author: efourrier Purpose : Create toolbox functions to use for the different pieces of code ot the package """ from numpy.random import normal from numpy.random import choice import time import pandas as pd import numpy as np import functools def print_section(section_name, width=120): """ print centered section for reports in DataExplora""" section_name = ' ' + section_name + ' ' print('{:=^{ }}'.format(section_name, width)) # def get_dataset(name, *args, **kwargs): # """Get a dataset from the online repo # https://github.com/ericfourrier/autoc-datasets (requires internet). # # Parameters # ---------- # name : str # Name of the dataset 'name.csv' # """ # path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name) # return pd.read_csv(path, *args, **kwargs) def flatten_list(x): return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x] def cserie(serie, index=False): if index: return serie[serie].index else: return serie[serie].index.tolist() def removena_numpy(array): return array[~(np.isnan(array))] def common_cols(df1, df2): """ Return the intersection of commun columns name """ return list(set(df1.columns) & set(df2.columns)) def bootstrap_ci(x, n=300, ci=0.95): """ this is a function depending on numpy to compute bootstrap percentile confidence intervalfor the mean of a numpy array Arguments --------- x : a numpy ndarray n : the number of boostrap samples ci : the percentage confidence (float) interval in ]0,1[ Return ------- a tuple (ci_inf,ci_up) """ low_per = 100 * (1 - ci) / 2 high_per = 100 * ci + low_per x = removena_numpy(x) if not len(x): return (np.nan, np.nan) bootstrap_samples = choice(a=x, size=( len(x), n), replace = True).mean(axis = 0) return np.percentile(bootstrap_samples, [low_per, high_per]) def clock(func): """ decorator to measure the duration of each test of the unittest suite, this is extensible for any kind of functions it will just add a print """ def clocked(*args): t0 = time.time() result = func(*args) elapsed = (time.time() - t0) * 1000 # in ms print('elapsed : [{0:0.3f}ms]'.format(elapsed)) return result return clocked def cached_property(fun): """A memoize decorator for class properties.""" @functools.wraps(fun) def get(self): try: return self._cache[fun] except AttributeError: self._cache = {} except KeyError: pass ret = self._cache[fun] = fun(self) return ret return property(get) def create_test_df(): """ Creating a test pandas DataFrame for the unittest suite """ test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [ 10 * i for i in range(1, 1001)]}) test_df['na_col'] = np.nan test_df['id_na'] = test_df.id test_df.loc[1:3, 'id_na'] = np.nan test_df['constant_col'] = 'constant' test_df['constant_col_num'] = 0 test_df['character_factor'] = [ choice(list('ABCDEFG')) for _ in range(1000)] test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)] test_df['nearzerovar_variable'] = 'most_common_value' test_df.loc[0, 'nearzerovar_variable'] = 'one_value' test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)] test_df['character_variable'] = [str(i) for i in range(1000)] test_df['duplicated_column'] = test_df.id test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700 test_df['character_variable_fillna'] = ['A'] * \ 300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300 test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200 test_df['num_variable'] = 100.0 test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)] test_df['outlier'] = normal(size=1000) test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999] test_df['outlier_na'] = test_df['outlier'] test_df.loc[[300, 500], 'outlier_na'] = np.nan test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H') test_df['None_100'] = [1] * 900 + [None] * 100 test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100 test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300 test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300 test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \ ['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \ ['Unknown'] * 100 + ['do_not_touch'] * 200 return test_df def simu(pmf, size): """ Draw one sample from of a discrete distribution, pmf is supposed to be in ascending order Parameters ---------- pmf : tuple(ndarray, ndarray) a tuple with (labels,probs) labels are supposed to be in ascending order size: int the number of sampel you want generate Returns ------ int (depends of the type of labels) draw a random sample from the pmf """ labels, probs = pmf[0], pmf[1] u = np.random.rand(size) cumulative_sum = probs.cumsum() return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)] def shuffle_df(df, reindex=False): new_df = df.sample(frac=1) if not reindex else df.sample( frac=1).reset_index() return new_df def random_pmf(nb_labels): """ Return a random probability mass function of nb_labels""" random_numbers = np.random.random(nb_labels) return random_numbers / np.sum(random_numbers) def random_histogram(nb_labels, nb_observations): """ Return a random probability mass function of nb_labels""" random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels) return random_histo / np.sum(random_histo) def keep_category(df, colname, pct=0.05, n=5): """ Keep a pct or number of every levels of a categorical variable Parameters ---------- pct : float Keep at least pct of the nb of observations having a specific category n : int Keep at least n of the variables having a specific category Returns -------- Returns an index of rows to keep """ tokeep = [] nmin = df.groupby(colname).apply(lambda x: x.sample( max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index) for index in nmin: tokeep += index.tolist() return pd.Index(tokeep) # for k, i in df.groupby(colname).groups: # to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False) # return to_keep # def simulate_na_col(df, colname, n=None, pct=None, weights=None, safety=True, *args, **kwargs): """ Simulate missing values in a column of categorical variables Notes ----- Fix issue with category variable""" # if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int': # raise ValueError('This function only support categorical variables') if (n is None) and (pct is not None): # be careful here especially if cols has a lot of missing values n = int(pct * df.shape[0]) if isinstance(colname, pd.core.index.Index) or isinstance(colname, list): for c in colname: simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights) else: if safety: tokeep = keep_category(df, colname, *args, **kwargs) # we are not smapling from tokeep col = df.loc[:, colname].drop(tokeep) col = col.dropna() print(colname) col_distribution = col.value_counts(normalize=True, sort=False) labels = col_distribution.index # characters # generate random pmf pmf_na = weights if weights else random_pmf(len(labels)) na_distribution = pd.Series(data=pmf_na, index=labels) # draw samples from this pmf weights_na = col.apply(lambda x: na_distribution[x]) weights_na /= weights_na.sum() index_to_replace = col.sample( n=n, weights=weights_na, replace=False).index df.loc[index_to_replace, colname] = np.nan def get_test_df_complete(): """ get the full test dataset from Lending Club open source database, the purpose of this fuction is to be used in a demo ipython notebook """ import requests from zipfile import ZipFile import StringIO zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip" r = requests.get(zip_to_download) zipfile = ZipFile(StringIO.StringIO(r.content)) file_csv = zipfile.namelist()[0] # we are using the c parser for speed df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''], parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d']) zipfile.close() df = df[:-2] nb_row = float(len(df.index)) df['na_col'] = np.nan df['constant_col'] = 'constant' df['duplicated_column'] = df.id df['many_missing_70'] = np.nan df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1 df['bad'] = 1 index_good = df['loan_status'].isin( ['Fully Paid', 'Current', 'In Grace Period']) df.loc[index_good, 'bad'] = 0 return df def kl(p, q): """ Kullback-Leibler divergence for discrete distributions Parameters ---------- p: ndarray probability mass function q: ndarray probability mass function Returns -------- float : D(P || Q) = sum(p(i) * log(p(i)/q(i)) Discrete probability distributions. """ return np.sum(np.where(p != 0, p * np.log(p / q), 0)) def kl_series(serie1, serie2, dropna=True): if dropna: serie1 = serie1.dropna() serie2 = serie2.dropna() return kl(serie1.value_counts(normalize=True).values, serie2.value_counts(normalize=True).values) def plot_hist_na(df, colname): df_h = df.copy() na_name = "is_na_{}".format(colname) df_h[na_name] = df_h[colname].isnull().astype(int) measure_col = cserie((df.dtypes == int) | (df.dtypes == float)) df_h.groupby(na_name)[measure_col].hist() def psi(bench, target, group, print_df=True): """ This function return the Population Stability Index, quantifying if the distribution is stable between two states. This statistic make sense and works is only working for numeric variables for bench and target. Params: - bench is a numpy array with the reference variable. - target is a numpy array of the new variable. - group is the number of group you want consider. """ labels_q = np.percentile( bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest") # This is the right approach when you have not a lot of unique value ben_pct = (pd.cut(bench, bins=np.unique(labels_q), include_lowest=True).value_counts()) / len(bench) target_pct = (pd.cut(target, bins=np.unique(labels_q), include_lowest=True).value_counts()) / len(target) target_pct = target_pct.sort_index() # sort the index ben_pct = ben_pct.sort_index() # sort the index psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct)) # Print results for better understanding if print_df: results = pd.DataFrame({'ben_pct': ben_pct.values, 'target_pct': target_pct.values}, index=ben_pct.index) return {'data': results, 'statistic': psi} return psi
/setup.py
from setuptools import setup, find_packages def readme(): with open('README.md') as f: return f.read() setup(name='autoc', version="0.1", description='autoc is a package for data cleaning exploration and modelling in pandas', long_description=readme(), author=['Eric Fourrier'], author_email='ericfourrier0@gmail.com', license='MIT', url='https://github.com/ericfourrier/auto-cl', packages=find_packages(), test_suite='test', keywords=['cleaning', 'preprocessing', 'pandas'], install_requires=[ 'numpy>=1.7.0', 'pandas>=0.15.0', 'seaborn>=0.5', 'scipy>=0.14'] )
/test.py
# -*- coding: utf-8 -*- """ @author: efourrier Purpose : Automated test suites with unittest run "python -m unittest -v test" in the module directory to run the tests The clock decorator in utils will measure the run time of the test """ ######################################################### # Import Packages and helpers ######################################################### import unittest # internal helpers # from autoc.utils.helpers import clock, create_test_df, removena_numpy, cserie from autoc.utils.helpers import random_pmf, clock, create_test_df, cserie, simu, removena_numpy from autoc.utils.getdata import get_dataset from autoc.explorer import DataExploration from autoc.naimputer import NaImputer from autoc.outliersdetection import OutliersDetection import pandas as pd import numpy as np flatten_list = lambda x: [y for l in x for y in flatten_list( l)] if isinstance(x, list) else [x] # flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x,list) else [x] ######################################################### # Writing the tests ######################################################### class TestDataExploration(unittest.TestCase): @classmethod def setUpClass(cls): """ creating test data set for the test module """ cls._test_df = create_test_df() cls._test_dc = DataExploration(data=cls._test_df) @clock def test_to_lowercase(self): df_lower = self._test_dc.to_lowercase() self.assertNotEqual(id(df_lower), id(self._test_dc.data)) self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['c'] * 300)== df_lower.loc[:, 'character_variable_up1']).all()) self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['d'] * 300)== df_lower.loc[:, 'character_variable_up2']).all()) @clock def test_copy(self): exploration_copy = DataExploration(data=create_test_df(), copy=True) self.assertEqual(id(self._test_df), id(self._test_dc.data)) self.assertNotEqual(id(self._test_df), id(exploration_copy.data)) @clock def test_cserie(self): char_var = cserie(self._test_dc.data.dtypes == "object") self.assertIsInstance(char_var, list) self.assertIn('character_variable', char_var) @clock def test_removena_numpy(self): test_array = np.array([np.nan, 1, 2, np.nan]) self.assertTrue((removena_numpy(test_array) == np.array([1, 2])).all()) @clock def test_sample_df(self): self.assertEqual(len(self._test_dc.sample_df(pct=0.061)), 0.061 * float(self._test_dc.data.shape[0])) @clock def test_nrow(self): self.assertEqual(self._test_dc._nrow, self._test_dc.data.shape[0]) @clock def test_col(self): self.assertEqual(self._test_dc._ncol, self._test_dc.data.shape[1]) @clock def test_is_numeric(self): self.assertTrue(self._test_dc.is_numeric("num_variable")) self.assertTrue(self._test_dc.is_numeric("many_missing_70")) self.assertFalse(self._test_dc.is_numeric("character_variable")) @clock def test_is_int_factor(self): self.assertFalse(self._test_dc.is_int_factor("num_variable")) self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.01)) self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.1)) self.assertFalse(self._test_dc.is_int_factor("int_factor_10", 0.005)) self.assertFalse(self._test_dc.is_int_factor("character_variable")) @clock def test_where_numeric(self): self.assertEqual(cserie(self._test_dc.where_numeric().all()), self._test_dc._dfnum) @clock def test_total_missing(self): self.assertEqual(self._test_dc.total_missing, self._test_dc.data.isnull().sum().sum()) @clock def test_None_count(self): nacolcount = self._test_dc.nacolcount() self.assertEqual(nacolcount.loc['None_100', 'Napercentage'], 0.1) self.assertEqual(nacolcount.loc['None_100', 'Nanumber'], 100) self.assertEqual(nacolcount.loc['None_na_200', 'Napercentage'], 0.2) self.assertEqual(nacolcount.loc['None_na_200', 'Nanumber'], 200) @clock def test_nacolcount_capture_na(self): nacolcount = self._test_dc.nacolcount() self.assertEqual(nacolcount.loc['na_col', 'Napercentage'], 1.0) self.assertEqual(nacolcount.loc['many_missing_70', 'Napercentage'], 0.7) @clock def test_nacolcount_is_type_dataframe(self): self.assertIsInstance(self._test_dc.nacolcount(), pd.core.frame.DataFrame) @clock def test_narowcount_capture_na(self): narowcount = self._test_dc.narowcount() self.assertEqual(sum(narowcount['Nanumber'] > 0), self._test_dc._nrow) # # @clock # def test_detect_other_na(self): # other_na = self._test_dc.detect_other_na() # self.assertIsInstance(narowcount, pd.core.frame.DataFrame) @clock def test_narowcount_is_type_dataframe(self): narowcount = self._test_dc.narowcount() self.assertIsInstance(narowcount, pd.core.frame.DataFrame) @clock def test_manymissing_capture(self): manymissing = self._test_dc.manymissing(0.7) self.assertIsInstance(manymissing, list) self.assertIn('many_missing_70', manymissing) self.assertIn('na_col', manymissing) @clock def test_nacols_full(self): nacols_full = self._test_dc.nacols_full self.assertIsInstance(nacols_full, list) self.assertIn('na_col',nacols_full ) @clock def test_narows_full(self): test_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD')) test_df.loc[99, :] = np.nan self.assertIn(99, DataExploration(test_df).narows_full) self.assertNotIn(1, test_df) @clock def test_constant_col_capture(self): constantcol = self._test_dc.constantcol() self.assertIsInstance(constantcol, list) self.assertIn('constant_col', constantcol) self.assertIn('constant_col_num', constantcol) self.assertIn('na_col', constantcol) @clock def test_count_unique(self): count_unique = self._test_dc.count_unique() self.assertIsInstance(count_unique, pd.Series) self.assertEqual(count_unique.id, 1000) self.assertEqual(count_unique.constant_col, 1) self.assertEqual(count_unique.character_factor, 7) @clock def test_dfchar_check_col(self): dfchar = self._test_dc._dfchar self.assertIsInstance(dfchar, list) self.assertNotIn('num_variable', dfchar) self.assertIn('character_factor', dfchar) self.assertIn('character_variable', dfchar) self.assertNotIn('many_missing_70', dfchar) @clock def test_dfnum_check_col(self): dfnum = self._test_dc._dfnum self.assertIsInstance(dfnum, list) self.assertIn('num_variable', dfnum) self.assertNotIn('character_factor', dfnum) self.assertNotIn('character_variable', dfnum) self.assertIn('many_missing_70', dfnum) @clock def test_factors_check_col(self): factors = self._test_dc.factors() self.assertIsInstance(factors, list) self.assertNotIn('num_factor', factors) self.assertNotIn('character_variable', factors) self.assertIn('character_factor', factors) @clock def test_detectkey_check_col(self): detectkey = self._test_dc.detectkey() self.assertIsInstance(detectkey, list) self.assertIn('id', detectkey) self.assertIn('member_id', detectkey) @clock def test_detectkey_check_col_dropna(self): detectkeyna = self._test_dc.detectkey(dropna=True) self.assertIn('id_na', detectkeyna) self.assertIn('id', detectkeyna) self.assertIn('member_id', detectkeyna) @clock def test_findupcol_check(self): findupcol = self._test_dc.findupcol() self.assertIn(['id', 'duplicated_column'], findupcol) self.assertNotIn('member_id', flatten_list(findupcol)) @clock def test_count_unique(self): count_unique = self._test_dc.count_unique() self.assertIsInstance(count_unique, pd.Series) self.assertEqual(count_unique.id, len(self._test_dc.data.id)) self.assertEqual(count_unique.constant_col, 1) self.assertEqual(count_unique.num_factor, len( pd.unique(self._test_dc.data.num_factor))) @clock def test_structure(self): structure = self._test_dc.structure() self.assertIsInstance(structure, pd.DataFrame) self.assertEqual(len(self._test_dc.data), structure.loc['na_col', 'nb_missing']) self.assertEqual(len(self._test_dc.data), structure.loc[ 'id', 'nb_unique_values']) self.assertTrue(structure.loc['id', 'is_key']) @clock def test_nearzerovar(self): nearzerovar = self._test_dc.nearzerovar(save_metrics=True) self.assertIsInstance(nearzerovar, pd.DataFrame) self.assertIn('nearzerovar_variable', cserie(nearzerovar.nzv)) self.assertIn('constant_col', cserie(nearzerovar.nzv)) self.assertIn('na_col', cserie(nearzerovar.nzv)) class TestNaImputer(unittest.TestCase): @classmethod def setUpClass(cls): """ creating test data set for the test module """ cls._test_na = NaImputer(data=create_test_df()) @clock def test_fillna_serie(self): test_serie = pd.Series([1, 3, np.nan, 5]) self.assertIsInstance( self._test_na.fillna_serie(test_serie), pd.Series) self.assertEqual(self._test_na.fillna_serie(test_serie)[2], 3.0) @clock def test_fillna_serie(self): test_char_variable = self._test_na.fillna_serie('character_variable_fillna') test_num_variable = self._test_na.fillna_serie('numeric_variable_fillna') self.assertTrue(test_char_variable.notnull().any()) self.assertTrue(test_num_variable.notnull().any()) self.assertTrue((pd.Series( ['A'] * 300 + ['B'] * 200 + ['C'] * 200 + ['A'] * 300) == test_char_variable).all()) self.assertTrue( (pd.Series([1] * 400 + [3] * 400 + [2] * 200) == test_num_variable).all()) @clock def test_fill_low_na(self): df_fill_low_na = self._test_na.basic_naimputation(columns_to_process=['character_variable_fillna', 'numeric_variable_fillna']) df_fill_low_na_threshold = self._test_na.basic_naimputation(threshold=0.4) self.assertIsInstance(df_fill_low_na, pd.DataFrame) self.assertIsInstance(df_fill_low_na_threshold, pd.DataFrame) self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [ 'A'] * 300) == df_fill_low_na.character_variable_fillna).all()) self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200) == df_fill_low_na.numeric_variable_fillna).all()) self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [ 'A'] * 300) == df_fill_low_na_threshold.character_variable_fillna).all()) self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200) == df_fill_low_na_threshold.numeric_variable_fillna).all()) self.assertTrue( sum(pd.isnull(df_fill_low_na_threshold.many_missing_70)) == 700) class TestOutliersDetection(unittest.TestCase): @classmethod def setUpClass(cls): """ creating test data set for the test module """ cls.data = create_test_df() cls.outlier_d = OutliersDetection(cls.data) @clock def test_outlier_detection_serie_1d(self): strong_cutoff = self.outlier_d.strong_cutoff df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier', strong_cutoff) self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) @clock def test_outlier_detection_serie_1d_with_na(self): strong_cutoff = self.outlier_d.strong_cutoff df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier_na', strong_cutoff) self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1)) class TestHelper(unittest.TestCase): @classmethod def setUpClass(cls): """ creating test data set for the test module """ cls.data = create_test_df() @clock def test_random_pmf(self): self.assertAlmostEqual(len(random_pmf(10)), 10) self.assertAlmostEqual(random_pmf(10).sum(), 1) @clock def test_simu(self): pmf = random_pmf(4) samples_unique = simu((np.array(['A', 'B']), np.array([0, 1])), 10) self.assertTrue((samples_unique == 'B').all()) # class TestGetData(unittest.TestCase): # # @clock # def test_getdata_titanic(self): # """ Test if downloading titanic data is working """ # titanic = get_dataset('titanic') # self.assertIsInstance(titanic, pd.DataFrame) # self.assertEqual(titanic.shape[0], 891) # self.assertEqual(titanic.shape[1], 15) # Adding new tests sets # def suite(): # suite = unittest.TestSuite() # suite.addTest(TestPandasPatch('test_default_size')) # return suite # Other solution than calling main #suite = unittest.TestLoader().loadTestsFromTestCase(TestPandasPatch) #unittest.TextTestRunner(verbosity = 1 ).run(suite) if __name__ == "__main__": unittest.main(exit=False)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
thinkAmi-sandbox/AWS_CDK-sample
refs/heads/master
{"/step_functions/app.py": ["/step_functions/step_functions/step_functions_stack.py"]}
└── └── step_functions ├── app.py └── step_functions ├── lambda_function │ ├── error │ │ └── lambda_function.py │ ├── first │ │ └── lambda_function.py │ ├── second │ │ └── lambda_function.py │ └── third │ └── lambda_function.py ├── settings.example.py └── step_functions_stack.py
/step_functions/app.py
#!/usr/bin/env python3 from aws_cdk import core from step_functions.step_functions_stack import StepFunctionsStack app = core.App() # CFnのStack名を第2引数で渡す StepFunctionsStack(app, 'step-functions') app.synth()
/step_functions/step_functions/lambda_function/error/lambda_function.py
import json def lambda_handler(event, context): # { # "resource": "arn:aws:lambda:region:id:function:sfn_error_lambda", # "input": { # "Error": "Exception", # "Cause": "{\"errorMessage\": \"\\u5076\\u6570\\u3067\\u3059\", # \"errorType\": \"Exception\", # \"stackTrace\": [\" File \\\"/var/task/lambda_function.py\\\", line 5, # in lambda_handler\\n raise Exception('\\u5076\\u6570\\u3067\\u3059') # \\n\"]}" # }, # "timeoutInSeconds": null # } return { # JSONをPythonオブジェクト化することで、文字化けを直す 'error_message': json.loads(event['Cause']), }
/step_functions/step_functions/lambda_function/first/lambda_function.py
import os import boto3 from numpy.random import rand def lambda_handler(event, context): body = f'{event["message"]} \n value: {rand()}' client = boto3.client('s3') client.put_object( Bucket=os.environ['BUCKET_NAME'], Key='sfn_first.txt', Body=body, ) return { 'body': body, 'message': event['message'], }
/step_functions/step_functions/lambda_function/second/lambda_function.py
def lambda_handler(event, context): if event['parallel_no'] % 2 == 0: raise Exception('偶数です') return { 'message': event['message'], 'const_value': event['const_value'] }
/step_functions/step_functions/lambda_function/third/lambda_function.py
def lambda_handler(event, context): if event['parallel_no'] == 1: raise Exception('強制的にエラーとします') return 'only 3rd message.'
/step_functions/step_functions/settings.example.py
AWS_SCIPY_ARN = 'arn:aws:lambda:region:account_id:layer:AWSLambda-Python37-SciPy1x:2'
/step_functions/step_functions/step_functions_stack.py
import pathlib from aws_cdk import core from aws_cdk.aws_iam import PolicyStatement, Effect, ManagedPolicy, ServicePrincipal, Role from aws_cdk.aws_lambda import AssetCode, LayerVersion, Function, Runtime from aws_cdk.aws_s3 import Bucket from aws_cdk.aws_stepfunctions import Task, StateMachine, Parallel from aws_cdk.aws_stepfunctions_tasks import InvokeFunction, StartExecution from settings import AWS_SCIPY_ARN class StepFunctionsStack(core.Stack): def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.lambda_path_base = pathlib.Path(__file__).parents[0].joinpath('lambda_function') self.bucket = self.create_s3_bucket() self.managed_policy = self.create_managed_policy() self.role = self.create_role() self.first_lambda = self.create_first_lambda() self.second_lambda = self.create_other_lambda('second') self.third_lambda = self.create_other_lambda('third') self.error_lambda = self.create_other_lambda('error') self.sub_state_machine = self.create_sub_state_machine() self.main_state_machine = self.create_main_state_machine() def create_s3_bucket(self): return Bucket( self, 'S3 Bucket', bucket_name=f'sfn-bucket-by-aws-cdk', ) def create_managed_policy(self): statement = PolicyStatement( effect=Effect.ALLOW, actions=[ "s3:PutObject", ], resources=[ f'{self.bucket.bucket_arn}/*', ] ) return ManagedPolicy( self, 'Managed Policy', managed_policy_name='sfn_lambda_policy', statements=[statement], ) def create_role(self): service_principal = ServicePrincipal('lambda.amazonaws.com') return Role( self, 'Role', assumed_by=service_principal, role_name='sfn_lambda_role', managed_policies=[self.managed_policy], ) def create_first_lambda(self): function_path = str(self.lambda_path_base.joinpath('first')) code = AssetCode(function_path) scipy_layer = LayerVersion.from_layer_version_arn( self, f'sfn_scipy_layer_for_first', AWS_SCIPY_ARN) return Function( self, f'id_first', # Lambda本体のソースコードがあるディレクトリを指定 code=code, # Lambda本体のハンドラ名を指定 handler='lambda_function.lambda_handler', # ランタイムの指定 runtime=Runtime.PYTHON_3_7, # 環境変数の設定 environment={'BUCKET_NAME': self.bucket.bucket_name}, function_name='sfn_first_lambda', layers=[scipy_layer], memory_size=128, role=self.role, timeout=core.Duration.seconds(10), ) def create_other_lambda(self, function_name): function_path = str(self.lambda_path_base.joinpath(function_name)) return Function( self, f'id_{function_name}', code=AssetCode(function_path), handler='lambda_function.lambda_handler', runtime=Runtime.PYTHON_3_7, function_name=f'sfn_{function_name}_lambda', memory_size=128, timeout=core.Duration.seconds(10), ) def create_sub_state_machine(self): error_task = Task( self, 'Error Task', task=InvokeFunction(self.error_lambda), ) # 2つめのTask second_task = Task( self, 'Second Task', task=InvokeFunction(self.second_lambda), # 渡されてきた項目を絞ってLambdaに渡す input_path="$['first_result', 'parallel_no', 'message', 'context_name', 'const_value']", # 結果は second_result という項目に入れる result_path='$.second_result', # 次のタスクに渡す項目は絞る output_path="$['second_result', 'parallel_no']" ) # エラーハンドリングを追加 second_task.add_catch(error_task, errors=['States.ALL']) # 3つめのTask third_task = Task( self, 'Third Task', task=InvokeFunction(self.third_lambda), # third_lambdaの結果だけに差し替え result_path='$', ) # こちらもエラーハンドリングを追加 third_task.add_catch(error_task, errors=['States.ALL']) # 2つ目のTaskの次に3つ目のTaskを起動するように定義 definition = second_task.next(third_task) return StateMachine( self, 'Sub StateMachine', definition=definition, state_machine_name='sfn_sub_state_machine', ) def create_main_state_machine(self): first_task = Task( self, 'S3 Lambda Task', task=InvokeFunction(self.first_lambda, payload={'message': 'Hello world'}), comment='Main StateMachine', ) parallel_task = self.create_parallel_task() # 1番目のTaskの次に、パラレルなTask(StateMachine)をセット definition = first_task.next(parallel_task) return StateMachine( self, 'Main StateMachine', definition=definition, state_machine_name='sfn_main_state_machine', ) def create_parallel_task(self): parallel_task = Parallel( self, 'Parallel Task', ) for i in range(1, 4): sub_task = StartExecution( self.sub_state_machine, input={ 'parallel_no': i, 'first_result.$': '$', # first_taskのレスポンスにある、messageをセット 'message.$': '$.message', # コンテキストオブジェクトの名前をセット 'context_name.$': '$$.State.Name', # 固定値を2つ追加(ただ、Taskのinputでignore_valueは無視) 'const_value': 'ham', 'ignore_value': 'ignore', }, ) invoke_sub_task = Task( self, f'Sub Task {i}', task=sub_task, ) parallel_task.branch(invoke_sub_task) return parallel_task
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
greenmato/slackline-spots
refs/heads/master
{"/spots-api/map/urls.py": ["/spots-api/map/api.py", "/spots-api/map/views.py"], "/spots-api/map/api.py": ["/spots-api/map/forms.py", "/spots-api/map/models.py"], "/spots-api/map/forms.py": ["/spots-api/map/models.py"]}
└── └── spots-api └── map ├── api.py ├── forms.py ├── migrations │ ├── 0001_initial.py │ ├── 0005_auto_20180305_2131.py │ ├── 0007_auto_20180305_2139.py │ ├── 0008_auto_20180305_2211.py │ ├── 0009_auto_20180305_2215.py │ └── 0010_auto_20180306_2119.py ├── models.py ├── urls.py └── views.py
/spots-api/map/api.py
from abc import ABC, ABCMeta, abstractmethod from django.forms.models import model_to_dict from django.http import HttpResponse, JsonResponse from django.shortcuts import get_object_or_404 from django.views import View from django.views.decorators.csrf import csrf_exempt from django.utils.decorators import method_decorator from map.models import Spot from map.models import Vote from map.forms import SpotForm, VoteForm class BaseApi(View): __metaclass__ = ABCMeta def _response(self, body): response = {'data': body} return JsonResponse(response) def _error_response(self, status, error): response = {'error': error} return JsonResponse(response, status=status) class BaseSpotsApi(BaseApi): __metaclass__ = ABCMeta def _spot_to_dict(self, spot): spot_dict = model_to_dict(spot) spot_dict['score'] = spot.get_score() return spot_dict # @method_decorator(csrf_exempt, name='dispatch') class SpotsApi(BaseSpotsApi): def get(self, request): # TODO: only retrieve nearest spots and make them dynamically load as the map moves nearby_spots = Spot.objects.all() nearby_spots = list(map(self._spot_to_dict, nearby_spots)) return self._response(nearby_spots) def post(self, request): form = SpotForm(request.POST) if form.is_valid(): new_spot = Spot( name=request.POST['name'], description=request.POST['description'], latitude=request.POST['latitude'], longitude=request.POST['longitude'] ) new_spot.save() return self._response(self._spot_to_dict(new_spot)) return self._error_response(422, 'Invalid input.') class SpotApi(BaseSpotsApi): def get(self, request, spot_id): spot = get_object_or_404(Spot, pk=spot_id) return self._response(self._spot_to_dict(spot)) # @method_decorator(csrf_exempt, name='dispatch') class RatingsApi(BaseApi): def get(self, request, spot_id): spot = get_object_or_404(Spot, pk=spot_id) ratings = Rating.objects.filter(spot=spot_id, rating_type=rating_type.id) pass def post(self, request, spot_id): spot = get_object_or_404(Spot, pk=spot_id) pass # @method_decorator(csrf_exempt, name='dispatch') class VotesApi(BaseApi): def get(self, request, spot_id): spot = get_object_or_404(Spot, pk=spot_id) return self._response(spot.get_score()) def post(self, request, spot_id): spot = get_object_or_404(Spot, pk=spot_id) form = VoteForm(request.POST) if form.is_valid(): new_vote = Vote(spot=spot, positive=request.POST['positive']) new_vote.save() return self._response(model_to_dict(new_vote)) return self._error_response(422, 'Invalid input.')
/spots-api/map/forms.py
from django import forms from django.forms import ModelForm, Textarea from map.models import Spot, Rating, Vote class SpotForm(ModelForm): class Meta: model = Spot fields = ['name', 'description', 'latitude', 'longitude'] widgets = { 'latitude': forms.HiddenInput(), 'longitude': forms.HiddenInput(), } class RatingForm(ModelForm): class Meta: model = Rating fields = ['spot', 'rating_type', 'score'] widgets = { 'spot': forms.HiddenInput(), 'rating_type': forms.HiddenInput(), } class VoteForm(ModelForm): class Meta: model = Vote fields = ['positive'] widgets = { 'positive': forms.HiddenInput(), }
/spots-api/map/migrations/0001_initial.py
# Generated by Django 2.0 on 2017-12-17 18:04 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Spot', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.CharField(max_length=500)), ('latitude', models.DecimalField(decimal_places=6, max_digits=9)), ('longitude', models.DecimalField(decimal_places=6, max_digits=9)), ], ), ]
/spots-api/map/migrations/0005_auto_20180305_2131.py
# Generated by Django 2.0.1 on 2018-03-05 21:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('map', '0004_ratingtype'), ] operations = [ migrations.AlterField( model_name='spot', name='latitude', field=models.DecimalField(decimal_places=7, max_digits=10), ), migrations.AlterField( model_name='spot', name='longitude', field=models.DecimalField(decimal_places=7, max_digits=10), ), ]
/spots-api/map/migrations/0007_auto_20180305_2139.py
# Generated by Django 2.0.1 on 2018-03-05 21:39 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('map', '0006_rating'), ] operations = [ migrations.RenameField( model_name='rating', old_name='rating_type_id', new_name='rating_type', ), migrations.RenameField( model_name='rating', old_name='spot_id', new_name='spot', ), ]
/spots-api/map/migrations/0008_auto_20180305_2211.py
# Generated by Django 2.0.1 on 2018-03-05 22:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('map', '0007_auto_20180305_2139'), ] operations = [ migrations.RenameField( model_name='rating', old_name='rating_type', new_name='rating', ), migrations.AddField( model_name='rating', name='score', field=models.IntegerField(default=0), ), ]
/spots-api/map/migrations/0009_auto_20180305_2215.py
# Generated by Django 2.0.1 on 2018-03-05 22:15 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('map', '0008_auto_20180305_2211'), ] operations = [ migrations.CreateModel( name='Vote', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('positive', models.BooleanField()), ('spot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map.Spot')), ], ), migrations.AlterField( model_name='rating', name='score', field=models.IntegerField(), ), ]
/spots-api/map/migrations/0010_auto_20180306_2119.py
# Generated by Django 2.0.1 on 2018-03-06 21:19 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('map', '0009_auto_20180305_2215'), ] operations = [ migrations.RenameField( model_name='rating', old_name='rating', new_name='rating_type', ), ]
/spots-api/map/models.py
from django.db import models from django.core.validators import MaxValueValidator, MinValueValidator class Spot(models.Model): name = models.CharField(max_length=50) description = models.CharField(max_length=500) latitude = models.DecimalField(max_digits=10, decimal_places=7) longitude = models.DecimalField(max_digits=10, decimal_places=7) created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) def __str__(self): spot = "Spot %s - %s: %s" % (self.id, self.name, self.description) return spot def get_score(self): votes = Vote.objects.filter(spot=self.id) score = 0 for vote in votes: score += 1 if vote.positive else -1 return score def get_ratings_dict(self): ratings = Rating.objects.filter(spot=self.id) ratings_dict = {} for rating in ratings: if rating.rating_type.name in ratings_dict: ratings_dict[rating.rating_type.name] += rating.score else: ratings_dict[rating.rating_type.name] = rating.score for rating_type, score in ratings_dict.items(): ratings_dict[rating_type] = round((score / ratings.count()), 2) return ratings_dict class RatingType(models.Model): name = models.CharField(max_length=50) def __str__(self): rating_type = self.name return rating_type class Rating(models.Model): spot = models.ForeignKey(Spot, on_delete=models.CASCADE) rating_type = models.ForeignKey(RatingType, on_delete=models.CASCADE) score = models.IntegerField( validators=[ MaxValueValidator(10), MinValueValidator(1) ] ) class Vote(models.Model): spot = models.ForeignKey(Spot, on_delete=models.CASCADE) positive = models.BooleanField()
/spots-api/map/urls.py
from django.urls import path from django.conf import settings from django.conf.urls.static import static from map.views import MapView from map.api import SpotsApi, SpotApi, RatingsApi, VotesApi app_name = 'map' urlpatterns = [ path('', MapView.as_view(), name='index'), path('spots/', SpotsApi.as_view()), path('spots/<int:spot_id>/', SpotApi.as_view()), path('spots/<int:spot_id>/ratings/', RatingsApi.as_view()), path('spots/<int:spot_id>/votes/', VotesApi.as_view()), ] if settings.DEBUG is True: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
/spots-api/map/views.py
from django.shortcuts import render from django.views import View class MapView(View): def get(self, request): return render(request, 'map/index.html')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
katrii/ohsiha
refs/heads/master
{"/ohjelma/views.py": ["/ohjelma/models.py"]}
└── └── ohjelma ├── apps.py ├── migrations │ ├── 0002_song.py │ ├── 0003_song_release_year.py │ ├── 0004_track.py │ ├── 0005_auto_20200329_1313.py │ ├── 0006_auto_20200329_1329.py │ ├── 0007_track_track_id.py │ └── 0009_auto_20200411_2211.py ├── models.py ├── urls.py └── views.py
/ohjelma/apps.py
from django.apps import AppConfig class OhjelmaConfig(AppConfig): name = 'ohjelma'
/ohjelma/migrations/0002_song.py
# Generated by Django 3.0.2 on 2020-03-13 17:36 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0001_initial'), ] operations = [ migrations.CreateModel( name='Song', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('song_name', models.CharField(max_length=200)), ('song_artist', models.CharField(max_length=200)), ], ), ]
/ohjelma/migrations/0003_song_release_year.py
# Generated by Django 3.0.2 on 2020-03-15 16:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0002_song'), ] operations = [ migrations.AddField( model_name='song', name='release_year', field=models.IntegerField(default=2000), ), ]
/ohjelma/migrations/0004_track.py
# Generated by Django 3.0.2 on 2020-03-28 23:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0003_song_release_year'), ] operations = [ migrations.CreateModel( name='Track', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('track_name', models.CharField(max_length=500)), ('track_artist', models.CharField(max_length=500)), ('track_duration', models.IntegerField(default=200000)), ('track_popularity', models.IntegerField(default=100)), ], ), ]
/ohjelma/migrations/0005_auto_20200329_1313.py
# Generated by Django 3.0.2 on 2020-03-29 10:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0004_track'), ] operations = [ migrations.AlterField( model_name='track', name='track_duration', field=models.CharField(max_length=5), ), ]
/ohjelma/migrations/0006_auto_20200329_1329.py
# Generated by Django 3.0.2 on 2020-03-29 10:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0005_auto_20200329_1313'), ] operations = [ migrations.AlterField( model_name='track', name='track_duration', field=models.CharField(max_length=10), ), ]
/ohjelma/migrations/0007_track_track_id.py
# Generated by Django 3.0.2 on 2020-04-11 18:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0006_auto_20200329_1329'), ] operations = [ migrations.AddField( model_name='track', name='track_id', field=models.CharField(default=0, max_length=30), preserve_default=False, ), ]
/ohjelma/migrations/0009_auto_20200411_2211.py
# Generated by Django 3.0.2 on 2020-04-11 19:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ohjelma', '0008_track_track_danceability'), ] operations = [ migrations.AddField( model_name='track', name='track_acousticness', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_energy', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_instrumentalness', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_key', field=models.IntegerField(default=0, max_length=3), preserve_default=False, ), migrations.AddField( model_name='track', name='track_liveness', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_loudness', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_speechiness', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_tempo', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='track', name='track_valence', field=models.FloatField(default=0, max_length=10), preserve_default=False, ), ]
/ohjelma/models.py
from django.db import models from django.urls import reverse class Question(models.Model): question_text = models.CharField(max_length=200) pub_date = models.DateTimeField('Date published') class Choice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) class Song(models.Model): song_name = models.CharField(max_length=200) song_artist = models.CharField(max_length = 200) release_year = models.IntegerField(default=2000) def __str__(self): return self.song_name def get_absolute_url(self): return reverse('song_edit', kwargs={'pk': self.pk}) class Track(models.Model): track_id = models.CharField(max_length=30) track_name = models.CharField(max_length=500) track_artist = models.CharField(max_length = 500) track_duration = models.CharField(max_length = 10) track_popularity = models.IntegerField(default=100) track_danceability = models.FloatField(max_length=10) track_energy = models.FloatField(max_length=10) track_key = models.IntegerField(max_length=3) track_loudness = models.FloatField(max_length=10) track_speechiness = models.FloatField(max_length=10) track_acousticness = models.FloatField(max_length=10) track_instrumentalness = models.FloatField(max_length=10) track_liveness = models.FloatField(max_length=10) track_valence = models.FloatField(max_length=10) track_tempo = models.FloatField(max_length=10) def __str__(self): return self.track_name
/ohjelma/urls.py
from django.urls import path from . import views urlpatterns = [ path('', views.index, name = 'home'), path('songs/', views.SongList.as_view(), name = 'song_list'), path('view/<int:pk>', views.SongView.as_view(), name = 'song_view'), path('new', views.SongCreate.as_view(), name = 'song_new'), path('view/<int:pk>', views.SongView.as_view(), name = 'song_view'), path('edit/<int:pk>', views.SongUpdate.as_view(), name = 'song_edit'), path('delete/<int:pk>', views.SongDelete.as_view(), name = 'song_delete'), path('tracks/', views.TrackView, name = 'track_list'), path('yearanalysis/', views.YearAnalysis, name = 'year_analysis'), path('analysis/<int:pk>', views.Analysis.as_view(), name = 'track_detail'), #url(r'^tracks/(?P<tracksyear>\w+)/$', views.TrackView, name = "TrackView") path('tracks/<int:tracksyear>', views.TrackView, name = "TrackView") ]
/ohjelma/views.py
from django.shortcuts import render from django.http import HttpResponse from django.views.generic import ListView, DetailView from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from ohjelma.models import Song from ohjelma.models import Track import json import spotipy from spotipy.oauth2 import SpotifyClientCredentials def index(request): return HttpResponse('Welcome.') class SongList(ListView): model = Song class SongView(DetailView): model = Song class SongCreate(CreateView): model = Song fields = ['song_name', 'song_artist', 'release_year'] success_url = reverse_lazy('song_list') class SongUpdate(UpdateView): model = Song fields = ['song_name', 'song_artist', 'release_year'] success_url = reverse_lazy('song_list') class SongDelete(DeleteView): model = Song success_url = reverse_lazy('song_list') #Formatting the duration time #Takes milliseconds as parameter and returns a string mm:ss def MsFormat(milliseconds): dur_s = (milliseconds/1000)%60 dur_s = int(dur_s) if dur_s < 10: dur_s = "0{}".format(dur_s) dur_m = (milliseconds/(1000*60))%60 dur_m = int(dur_m) dur = "{}:{}".format(dur_m, dur_s) return dur def TrackView(request, tracksyear): Track.objects.all().delete() #Clear old info query = 'year:{}'.format(tracksyear) #Spotify developer keys cid = '8f91d5aff7b54e1e93daa49f123d9ee9' secret = 'f23421ee54b144cabeab9e2dbe9104a7' client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret) sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager) #Lists for counting year averages l_dance = [] l_en = [] l_aco = [] l_val = [] for i in range(0,100,50): track_results = sp.search(q=query, type='track', limit=50,offset=i) for i, t in enumerate(track_results['tracks']['items']): id = t['id'] artist = t['artists'][0]['name'] song = t['name'] dur_ms = t['duration_ms'] pop = t['popularity'] dur = MsFormat(dur_ms) trackinfo = sp.audio_features(id) dance = trackinfo[0]['danceability'] en = trackinfo[0]['energy'] key = trackinfo[0]['key'] loud = trackinfo[0]['loudness'] spee = trackinfo[0]['speechiness'] aco = trackinfo[0]['acousticness'] inst = trackinfo[0]['instrumentalness'] live = trackinfo[0]['liveness'] val = trackinfo[0]['valence'] temp = trackinfo[0]['tempo'] l_dance.append(dance) l_en.append(en) l_aco.append(aco) l_val.append(val) Track.objects.create(track_id = id, track_artist = artist, track_name = song, track_duration = dur, track_popularity = pop, track_danceability = dance, track_energy = en, track_key = key, track_loudness = loud, track_speechiness = spee, track_acousticness = aco, track_instrumentalness = inst, track_liveness = live, track_valence = val, track_tempo = temp) avgdance = calculate_average(l_dance)*100 avgene = calculate_average(l_en)*100 avgaco = calculate_average(l_aco)*100 avgval = calculate_average(l_val)*100 alltracks = Track.objects.all() context = {'alltracks': alltracks, 'year': tracksyear, 'avgdance': avgdance, 'avgene': avgene, 'avgaco': avgaco, 'avgval': avgval} return render(request, 'tracks.html', context) #View for each track detailed information class Analysis(DetailView): model = Track #Takes a list (of numbers) as parameter, returns the average def calculate_average(num): sum_num = 0 for t in num: sum_num = sum_num + t avg = sum_num / len(num) return avg #View for analytics def YearAnalysis(request): #Spotify developer keys cid = '8f91d5aff7b54e1e93daa49f123d9ee9' secret = 'f23421ee54b144cabeab9e2dbe9104a7' client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret) sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager) #Lists for saving yearly averages dance = [] en = [] aco = [] val = [] years = [] most_populars = [] most_danceable = "" best_dance = 0 happiest = "" best_val = 0 most_acoustic = "" best_aco = 0 most_energetic = "" best_en = 0 for year in range (1980, 2020): bestpop = 0 mostpop = "" l_dance = [] l_en = [] l_aco = [] l_val = [] for i in range(0,100,50): query = 'year:{}'.format(year) track_results = sp.search(q=query, type='track', limit=50, offset=i) for i, t in enumerate(track_results['tracks']['items']): #Popularity check pop = t['popularity'] if pop > bestpop: mostpop = "{} by {}. Popularity: {}.".format(t['name'], t['artists'][0]['name'], pop) bestpop = pop elif pop == bestpop: mostpop = mostpop + " AND {} by {}. Popularity: {}.".format(t['name'], t['artists'][0]['name'], pop) id = t['id'] trackinfo = sp.audio_features(id) d = trackinfo[0]['danceability'] e = trackinfo[0]['energy'] a = trackinfo[0]['acousticness'] v = trackinfo[0]['valence'] l_dance.append(d) l_en.append(e) l_aco.append(a) l_val.append(v) if d > best_dance: most_danceable = "{} by {}. ({}) Danceability: {}.".format(t['name'], t['artists'][0]['name'], year, d) best_dance = d elif d == best_dance: most_danceable = most_danceable + " AND {} by {}. ({}) Danceability: {}.".format(t['name'], t['artists'][0]['name'], year, d) if e > best_en: most_energetic = "{} by {}. ({}) Energy: {}.".format(t['name'], t['artists'][0]['name'], year, e) best_en = e elif e == best_en: most_energetic = most_energetic + " AND {} by {}. ({}) Energy: {}.".format(t['name'], t['artists'][0]['name'], year, e) if a > best_aco: most_acoustic = "{} by {}. ({}) Acousticness: {}.".format(t['name'], t['artists'][0]['name'], year, a) best_aco = a elif a == best_aco: most_acoustic = most_acoustic + " AND {} by {}. ({}) Acousticness: {}.".format(t['name'], t['artists'][0]['name'], year, a) if v > best_val: happiest = "{} by {}. ({}) Valence: {}.".format(t['name'], t['artists'][0]['name'], year, v) best_val = v elif v == best_val: happiest = happiest + " AND {} by {}. ({}) Valence: {}.".format(t['name'], t['artists'][0]['name'], year, v) #Calculate year averages and add to lists dance.append(calculate_average(l_dance)) en.append(calculate_average(l_en)) aco.append(calculate_average(l_aco)) val.append(calculate_average(l_val)) years.append(year) most_populars.append(mostpop) #Zip year and most popular song to a list of 2-valued tuples yearly_populars = zip(years, most_populars) context = {"years": years, "danceability": dance, "energy": en, "acousticness": aco, "valence": val, "yearly_populars": yearly_populars, "most_acoustic": most_acoustic, "most_energetic": most_energetic, "most_danceable": most_danceable, "happiest": happiest} return render(request, 'analysis.html', context)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
lukasld/Flask-Video-Editor
refs/heads/main
{"/app/api/VideoProcessing.py": ["/app/api/decorators.py", "/app/api/errors.py"], "/app/api/videoApi.py": ["/app/api/VideoProcessing.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"]}
└── ├── app │ ├── __init__.py │ ├── api │ │ ├── VideoProcessing.py │ │ ├── __init__.py │ │ ├── decorators.py │ │ ├── errors.py │ │ ├── help.py │ │ ├── utils.py │ │ └── videoApi.py │ ├── docs │ │ └── __init__.py │ └── main │ └── errors.py └── config.py
/app/__init__.py
from flask import Flask from config import config from flask_caching import Cache from flask_swagger_ui import get_swaggerui_blueprint VIDEO_EXTENSION=None VIDEO_WIDTH=None VIDEO_HEIGHT=None VIDEO_UPLOAD_PATH=None FRAMES_UPLOAD_PATH=None IMG_EXTENSION=None HELP_MSG_PATH=None CACHE=None def create_app(config_name): global VIDEO_EXTENSION global VIDEO_WIDTH global VIDEO_HEIGHT global VIDEO_UPLOAD_PATH global FRAMES_UPLOAD_PATH global IMG_EXTENSION global HELP_MSG_PATH global CACHE app = Flask(__name__) app.config.from_object(config[config_name]) config[config_name].init_app(app) cache = Cache(config={"CACHE_TYPE": "filesystem", "CACHE_DIR": app.root_path + '/static/cache'}) cache.init_app(app) CACHE = cache VIDEO_EXTENSION = app.config["VIDEO_EXTENSION"] VIDEO_WIDTH = int(app.config["VIDEO_WIDTH"]) VIDEO_HEIGHT = int(app.config["VIDEO_HEIGHT"]) IMG_EXTENSION = app.config["IMG_EXTENSION"] VIDEO_UPLOAD_PATH = app.root_path + '/static/uploads/videos' FRAMES_UPLOAD_PATH = app.root_path + '/static/uploads/frames' HELP_MSG_PATH = app.root_path + '/static/helpmessages' #TODO: video max dimensions, video max length from .main import main as main_blueprint app.register_blueprint(main_blueprint) from .api import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/videoApi/v1') from .docs import swagger_ui app.register_blueprint(swagger_ui, url_prefix="/docs") return app
/app/api/VideoProcessing.py
from werkzeug.utils import secure_filename from functools import partial import subprocess as sp import time import skvideo.io import numpy as np import threading import ffmpeg import shlex import cv2 import re from PIL import Image from werkzeug.datastructures import FileStorage as FStorage from .. import VIDEO_EXTENSION, VIDEO_WIDTH, VIDEO_HEIGHT, \ VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION from . import utils from . errors import IncorrectVideoFormat, InvalidFilterParams, InvalidAPIUsage from . decorators import exception_handler FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 3 FRAME_WH = (VIDEO_WIDTH, VIDEO_HEIGHT) FFMPEG_COMMAND = 'ffmpeg -i pipe: -f rawvideo -pix_fmt bgr24 -an -sn pipe: -loglevel quiet' ID_LEN = 32 class Frame: def __init__(self, id=None): self.id = id @exception_handler(ex=IncorrectVideoFormat, type=2) def from_bytes(self, in_bytes: bytes) -> np.ndarray: """ """ frame_arr = np.frombuffer(in_bytes, np.uint8) f_arr = frame_arr.reshape([VIDEO_HEIGHT, VIDEO_WIDTH, 3]) return utils.bgr_to_rgb(f_arr) def f_save(self, frame: np.ndarray, frame_id: str) -> None: upload_path = utils.create_frame_path(frame_id) if utils.is_rgb(frame): Image.fromarray(frame).save(upload_path) return utils.img_from_greyscale(frame).save(upload_path) return def get_by_idx(self, frame_idx): vid = utils.create_vid_path(self.id) cap = cv2.VideoCapture(vid) cap.set(1, frame_idx) _, frame = cap.read() return frame class VideoUploader(Frame): def __init__(self): id = utils.id_generator(ID_LEN) super().__init__(id) self.frame_count = 0 def upload_from_bytestream(self, byte_stream: FStorage): video_f_path = utils.create_vid_path(self.id) sk_writer = utils.create_sk_video_writer(video_f_path) sh_command = shlex.split(FFMPEG_COMMAND) process = sp.Popen(sh_command, stdin=sp.PIPE, stdout=sp.PIPE, bufsize=10**8) thread = threading.Thread(target=self._writer, args=(process, byte_stream, )) thread.start() while True: in_bytes = process.stdout.read(FRAME_SIZE) if not in_bytes: break frame = self.from_bytes(in_bytes) self.frame_count += 1 if self.frame_count == 1: self.f_save(frame, self.id) sk_writer.writeFrame(frame) thread.join() sk_writer.close() def _writer(self, process, byte_stream): for chunk in iter(partial(byte_stream.read, 1024), b''): process.stdin.write(chunk) try: process.stdin.close() except (BrokenPipeError): pass class Filter: def __init__(self, img=None): self.img = img def applyCanny(self, params): if 'thresh1' in params and 'thresh2' in params: gs_img = self.applyGreyScale(params) return cv2.Canny(gs_img, int(params['thresh1']), int(params['thresh2'])) raise InvalidFilterParams(3, 'canny') def applyGauss(self, params): if 'ksize_x' and 'ksize_y' in params and \ params['ksize_x'] % 2 != 0 and \ params['ksize_y'] % 2 != 0: g_img = self.img.copy() if np.ndim(g_img) == 3: g_img = utils.bgr_to_rgb(g_img) return cv2.GaussianBlur(g_img, (int(params["ksize_x"]), int(params["ksize_y"])), 0) raise InvalidFilterParams(3, 'gauss') def applyGreyScale(self, _): c_img = self.img.copy() return cv2.cvtColor(c_img, cv2.COLOR_RGB2GRAY) def applyLaplacian(self, params): gs_img = self.applyGreyScale(params) return cv2.Laplacian(gs_img, cv2.CV_8U) def run_func(self, params): if params["type"] in self.filter_map: func = self.filter_map[params["type"]].__get__(self, type(self)) return func(params) raise InvalidFilterParams(2) def _default(self, _): return utils.bgr_to_rgb(self.img) filter_map = {'canny': applyCanny, 'gauss': applyGauss, 'greyscale': applyGreyScale, 'laplacian': applyLaplacian, '': _default} class VideoDownloader(Frame, Filter): def __init__(self, fps, vid_range=None): Frame.__init__(self) Filter.__init__(self) self.fps = fps self.vid_range = vid_range self.curr_f_frame = None if vid_range: self.range_min = vid_range[0] self.range_max = vid_range[1] def download(self, s_id, tot_video_frames, params): f_vid_name = f'{s_id}_{params["type"]}' video_f_path = utils.create_vid_path(f_vid_name) local_vid = cv2.VideoCapture(utils.create_vid_path(s_id)) vid_writer = utils.create_sk_video_writer(video_f_path, self.fps) for i in range(tot_video_frames-1): utils.set_cache_f_count(s_id, 'd', i) _, curr_frame = local_vid.read() if curr_frame is None: break self.img = curr_frame f_frame = self._filter_apply(i, params) vid_writer.writeFrame(f_frame) vid_writer.close() return f_vid_name def _filter_apply(self, i, params): """ we simply check if a range is given, then if we get a gs-img from the filter we add three dimensions """ if self.vid_range: if(i >= self.vid_range[0] and i <= self.vid_range[1]): f_frame = self.run_func(params) if not utils.is_rgb(f_frame): return np.dstack(3*[f_frame]) return f_frame else: return self.run_func({"type":""}) else: return self.run_func(params)
/app/api/__init__.py
from flask import Blueprint api = Blueprint('videoApi', __name__) from . import videoApi, errors, help
/app/api/decorators.py
from flask import request, jsonify from functools import wraps from .errors import InvalidAPIUsage, InvalidFilterParams, IncorrectVideoFormat """ Almost like an Architect - makes decorations """ def decorator_maker(func): def param_decorator(fn=None, does_return=None, req_c_type=None, req_type=None, arg=None, session=None): def deco(fn): @wraps(fn) def wrapper(*args, **kwargs): result = func(does_return, req_c_type, req_type, arg, session) if does_return: return fn(result) return fn(*args, **kwargs) return wrapper if callable(fn): return deco(fn) return deco return param_decorator """ Checks if user input is not out of bounds, and also Content-Type """ def wrap_param_check(does_return, req_c_type, req_type, arg, session): check_content_type(req_c_type) return check_correct_filter_params(session) def check_content_type(req_c_type): if not request.content_type.startswith(req_c_type): raise InvalidAPIUsage(f'Content-Type should be of type: {req_c_type}', 400) def check_correct_filter_params(session): if request.data: data = request.get_json() f_params = data['filter_params'] if 'filter_params' not in data: raise InvalidFilterParams(1) elif 'type' not in f_params: raise InvalidFilterParams(1) if 'download' in request.url: if 'fps' not in data: raise InvalidFilterParams(1) if 'max_f' in f_params and 'min_f' in f_params: max_fr = session['video_frame_count'] min_f_raw = f_params['min_f'] max_f_raw = f_params['max_f'] if min_f_raw == "": min_f_raw = 0 if max_f_raw == "": max_f_raw = max_fr min_f = _check_for_req_type(int, min_f_raw, 4) max_f = _check_for_req_type(int, max_f_raw, 4) a = check_bounds(min_f_raw, max_fr) b = check_bounds(max_f_raw, max_fr) return sorted([a, b]) def _check_for_req_type(req_type, val, ex): try: req_type(val) except Exception: raise InvalidFilterParams(ex) return val parameter_check = decorator_maker(wrap_param_check) """ Checks if user input is not out of bounds, and also Content-Type """ def wrap_url_arg_check(does_return, req_c_type, req_type, arg, session): check_arg_urls(req_type, arg) frame_idx = request.view_args[arg] return check_bounds(frame_idx, session['video_frame_count']) def check_arg_urls(req_type, arg): try: req_type(request.view_args[arg]) except ValueError: raise InvalidAPIUsage(f'Content-Type should be of type: {req_type.__name__}', 400) def check_bounds(frame_idx, max_frames): f_max = int(max_frames) f_idx = int(frame_idx) if f_idx > f_max: f_idx = f_max-50 elif f_idx < 1: f_idx = 1 return f_idx url_arg_check = decorator_maker(wrap_url_arg_check) """ Checks Video Metadata """ def wrap_metadata_check(does_return, req_c_type, req_type, arg, session): check_metadata(req_type) def check_metadata(req_type): byteStream = request.files['file'] vid_type = byteStream.__dict__['headers'].get('Content-Type') if vid_type != req_type: raise IncorrectVideoFormat(1) metadata_check = decorator_maker(wrap_metadata_check) """ Excpetion Handler for non-Endpoints """ def exception_handler(fn=None, ex=None, type=None, pas=False): def deco(fn): @wraps(fn) def wrapper(*args, **kwargs): try: fn(*args, **kwargs) except Exception: if not pas: raise ex(type) pass return fn(*args, **kwargs) return wrapper if callable(fn): return deco(fn) return deco
/app/api/errors.py
import sys import traceback from flask import jsonify, request from . import api class InvalidAPIUsage(Exception): status_code = 400 def __init__(self, message='', status_code=None): super().__init__() self.message = message self.path = request.path if status_code is None: self.status_code = InvalidAPIUsage.status_code def to_dict(self): rv = {} rv['path'] = self.path rv['status'] = self.status_code rv['message'] = self.message return rv class IncorrectVideoFormat(InvalidAPIUsage): def __init__(self, message_id): super().__init__() self.message = self.msg[message_id] msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed', 2:'Incorrect video dimensions: only 720p supported (1280*720)'} class InvalidFilterParams(InvalidAPIUsage): def __init__(self, message_id, filter_name=''): super().__init__() self.message = self.msg(message_id, filter_name) def msg(self, id, filter_name): # TODO:Lukas [07252021] messges could be stored in static files as JSON avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \ or for default preview, {"filter_params":{"type":""}}', 2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/', 3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/', 4:f'Incorrect download parameters: for more go to /api/v1/help/download/', } return avail_msg[id] @api.errorhandler(InvalidAPIUsage) def invalid_api_usage(e): return jsonify(e.to_dict()), 400
/app/api/help.py
from flask import jsonify, request, send_from_directory from . decorators import parameter_check from . import api from ..import HELP_MSG_PATH import json AV_EP = ["upload", "preview", "download", "stats", "filters"] AV_FILTERS = ["canny", "greyscale", "laplacian", "gauss"] @api.route('/help/', methods=['GET']) @api.route('/help/<endpts>/', methods=['GET']) @api.route('/help/filters/<filter_type>/', methods=['GET']) @parameter_check(req_c_type='application/json') def help(endpts=None, filter_type=None): if endpts and endpts in AV_EP: return jsonify(load_json_from_val(endpts)), 200 elif filter_type and filter_type in AV_FILTERS: return jsonify(load_json_from_val(filter_type)), 200 else: return jsonify(load_json_from_val('help')), 200 def load_json_from_val(val): f = open(HELP_MSG_PATH+f'/{val}.json') return json.load(f)
/app/api/utils.py
import cv2 import math import string import random import numpy as np import skvideo.io from PIL import Image from .. import VIDEO_EXTENSION, VIDEO_UPLOAD_PATH, \ FRAMES_UPLOAD_PATH, IMG_EXTENSION, CACHE FPS = 23.98 SK_CODEC = 'libx264' def create_vid_path(name): return f'{VIDEO_UPLOAD_PATH}/{name}{VIDEO_EXTENSION}' def create_frame_path(name): return f'{FRAMES_UPLOAD_PATH}/{name}{IMG_EXTENSION}' def framecount_from_vid_id(video_id): video_path = create_vid_path(video_id) cap = cv2.VideoCapture(video_path) return math.floor(cap.get(7)) def id_generator(size, chars=string.ascii_lowercase + string.digits) -> str: return ''.join(random.choice(chars) for _ in range(size)) def create_sk_video_writer(video_f_path, fps = None): if not fps : fps = FPS return skvideo.io.FFmpegWriter(video_f_path, outputdict={'-c:v':SK_CODEC, '-profile:v':'main', '-pix_fmt': 'yuv420p', '-r':str(fps)}) def set_cache_f_count(s_id: str, ud: str, fc: str) -> None: CACHE.set(f'{s_id}_{ud}', fc) def bgr_to_rgb(frame: np.ndarray) -> np.ndarray: return frame[:, :, ::-1] def is_greyscale(frame) -> bool: return frame.ndim == 2 def is_rgb(frame) -> bool: return frame.ndim == 3 def img_from_greyscale(frame: np.ndarray) -> Image: return Image.fromarray(frame).convert("L") def img_from_bgr(frame: np.ndarray) -> Image: return Image.fromarray(bgr_to_rgb(frame))
/app/api/videoApi.py
import os from flask import Flask, request, redirect, \ url_for, session, jsonify, send_from_directory, make_response, send_file from . import api from . import utils from .. import VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION, VIDEO_EXTENSION, CACHE from . VideoProcessing import Frame, VideoUploader, VideoDownloader, Filter from . decorators import parameter_check, url_arg_check, metadata_check from . errors import InvalidAPIUsage @api.route('/upload/', methods=['POST']) @parameter_check(does_return=False, req_c_type='multipart/form-data') @metadata_check(does_return=False, req_type='video/mp4') def upload_video(): """ uploads the video """ byteStream = request.files['file'] vu = VideoUploader() vu.upload_from_bytestream(byteStream) session['s_id'] = vu.id f_c = utils.framecount_from_vid_id(vu.id) session['video_frame_count'] = f_c session['is_uploaded'] = True return jsonify({'status' : '201', 'message' : 'video uploaded!'}), 201 @api.route('/preview/', defaults={'frame_idx':1}, methods=['GET']) @api.route('/preview/<frame_idx>/', methods=['GET', 'POST']) @parameter_check(does_return=False, req_c_type='application/json') @url_arg_check(does_return=True, req_type=int, arg='frame_idx', session=session) def preview_thumbnail(frame_idx): """ Preview a frame by index, given filter parameters """ if session.get('is_uploaded'): data = request.get_json() filter_params = data['filter_params'] session['filter_params'] = filter_params frame = Frame(session['s_id']) frame_i = frame.get_by_idx(frame_idx) filter_frame = Filter(frame_i).run_func(filter_params) frame.f_save(filter_frame, session['s_id']) return send_from_directory(directory=f'{FRAMES_UPLOAD_PATH}', path=f'{session["s_id"]}{IMG_EXTENSION}', as_attachment=True), 200 raise InvalidAPIUsage('Invalid usage: please upload a video first') @api.route('/download/', methods=['POST']) @parameter_check(does_return=True, req_c_type='application/json', session=session) def download_video(vid_range): """ Download a video given filter parameters """ if session.get('is_uploaded'): data = request.get_json() fps = data['fps'] filter_params = data['filter_params'] frame_count = session['video_frame_count'] vd = VideoDownloader(fps, vid_range) filter_vid = vd.download(session['s_id'], frame_count, filter_params) session['is_downloaded'] = True return send_from_directory(directory=f'{VIDEO_UPLOAD_PATH}', path=f'{filter_vid}{VIDEO_EXTENSION}', as_attachment=True), 200 raise InvalidAPIUsage('Invalid usage: please upload a video first') @api.route('/status/', methods=['GET']) @parameter_check(req_c_type='application/json') def status(): """ The progress of the user, uploaded, download / frames """ resp = {} try: if session['is_uploaded']: resp["upload"] = "done" if CACHE.get(f"{session['s_id']}_d"): d_status = CACHE.get(f"{session['s_id']}_d") resp["downloaded_frames"] = f'{d_status}/{session["video_frame_count"]}' if session["is_downloaded"]: resp["is_downloaded"] = True except KeyError: pass return jsonify({"status" : resp}), 200
/app/docs/__init__.py
from flask_swagger_ui import get_swaggerui_blueprint swagger_ui = get_swaggerui_blueprint( '/docs', '/static/swagger.json', config={ "app_name": "videoApi" } )
/app/main/errors.py
from flask import redirect, url_for, jsonify from . import main @main.app_errorhandler(404) def page_not_found(e): return jsonify(error=str(e)), 404 @main.app_errorhandler(405) def method_not_allowed(e): return jsonify(error=str(e)), 405
/config.py
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config: """ """ SECRET_KEY = os.environ.get('SECRET_KEY') FLASK_CONFIG = os.environ.get('FLASK_CONFIG') VIDEO_EXTENSION = os.environ.get('VIDEO_EXTENSION') VIDEO_WIDTH = os.environ.get('VIDEO_WIDTH') VIDEO_HEIGHT = os.environ.get('VIDEO_HEIGHT') IMG_EXTENSION = os.environ.get('IMG_EXTENSION') @staticmethod def init_app(app): pass class DevelopmentConfig(Config): """ """ DEBUG = True config = { 'development': DevelopmentConfig, 'default': DevelopmentConfig }
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
junprog/contrastive-baseline
refs/heads/main
{"/linear_eval.py": ["/models/create_linear_eval_model.py", "/utils/visualizer.py", "/datasets/cifar10.py"], "/train.py": ["/utils/contrastive_trainer.py", "/utils/simsiam_trainer.py"], "/utils/contrastive_trainer.py": ["/utils/helper.py", "/models/l2_contrastive_loss.py", "/utils/visualizer.py", "/datasets/spatial.py", "/models/siamese_net.py", "/datasets/cifar10.py"], "/utils/simsiam_trainer.py": ["/utils/helper.py", "/utils/visualizer.py", "/datasets/spatial.py", "/models/cosine_contrastive_loss.py", "/datasets/cifar10.py"]}
└── ├── datasets │ ├── cifar10.py │ └── spatial.py ├── exp.py ├── linear_eval.py ├── models │ ├── cosine_contrastive_loss.py │ ├── create_linear_eval_model.py │ ├── l2_contrastive_loss.py │ ├── siamese_net.py │ └── simple_siamese_net_tmp.py ├── train.py ├── train_val_split.py └── utils ├── contrastive_trainer.py ├── helper.py ├── simsiam_trainer.py └── visualizer.py
/datasets/cifar10.py
from typing import Callable, Optional import random from PIL import Image import numpy as np import torch import torchvision from torchvision import transforms from torchvision.datasets import CIFAR10 np.random.seed(765) random.seed(765) class SupervisedPosNegCifar10(torch.utils.data.Dataset): def __init__(self, dataset, phase): # split by some thresholds here 80% anchors, 20% for posnegs lengths = [int(len(dataset)*0.8), int(len(dataset)*0.2)] self.anchors, self.posnegs = torch.utils.data.random_split(dataset, lengths) if phase == 'train': self.anchor_transform = transforms.Compose([transforms.Resize(64), transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) self.posneg_transform = transforms.Compose([transforms.Resize(64), transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) else: self.anchor_transform = transforms.Compose([transforms.Resize(64), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) self.posneg_transform = transforms.Compose([transforms.Resize(64), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) def __len__(self): return len(self.anchors) def __getitem__(self, index): anchor, label = self.anchors[index] if self.anchor_transform is not None: anchor = self.anchor_transform(anchor) # now pair this up with an image from the same class in the second stream if random.random() > 0.5: A = np.where(np.array(self.posnegs.dataset.targets) == label)[0] posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)]) posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]] target = torch.tensor([1]).long() else: A = np.where(np.array(self.posnegs.dataset.targets) != label)[0] posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)]) posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]] target = torch.tensor([0]).long() if self.posneg_transform is not None: posneg = self.posneg_transform(posneg) return anchor, posneg, target, label class PosNegCifar10(torch.utils.data.Dataset): def __init__(self, dataset, phase): # split by some thresholds here 80% anchors, 20% for posnegs self.dataset = dataset if phase == 'train': self.anchor_transform = transforms.Compose([transforms.Resize(64), transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) self.posneg_transform = transforms.Compose([transforms.Resize(64), transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) else: self.anchor_transform = transforms.Compose([transforms.Resize(64), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) self.posneg_transform = transforms.Compose([transforms.Resize(64), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) def __len__(self): return len(self.dataset) def __getitem__(self, index): anchor, label = self.dataset[index] # now pair this up with an image from the same class in the second stream if random.random() > 0.5: posneg = anchor target = torch.tensor([1]).long() else: while True: neg_idx = random.randint(0, len(self.dataset)-1) if neg_idx != index: break posneg, label = self.dataset[neg_idx] target = torch.tensor([0]).long() if self.anchor_transform is not None: anchor = self.anchor_transform(anchor) if self.posneg_transform is not None: posneg = self.posneg_transform(posneg) return anchor, posneg, target, label ### Simple Siamese code imagenet_mean_std = [[0.485, 0.456, 0.406],[0.229, 0.224, 0.225]] class SimSiamTransform(): def __init__(self, image_size, train, mean_std=imagenet_mean_std): self.train = train if self.train: image_size = 224 if image_size is None else image_size # by default simsiam use image size 224 p_blur = 0.5 if image_size > 32 else 0 # exclude cifar # the paper didn't specify this, feel free to change this value # I use the setting from simclr which is 50% chance applying the gaussian blur # the 32 is prepared for cifar training where they disabled gaussian blur self.transform = transforms.Compose([ transforms.RandomResizedCrop(image_size, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4,0.4,0.4,0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([transforms.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=p_blur), transforms.ToTensor(), transforms.Normalize(*mean_std) ]) else: self.transform = transforms.Compose([ transforms.Resize(int(image_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256 transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize(*mean_std) ]) def __call__(self, x): x1 = self.transform(x) x2 = self.transform(x) return x1, x2 def get_simsiam_dataset(args, phase, download=True, debug_subset_size=None): if phase == 'train': train = True transform = SimSiamTransform(args.crop_size, train) elif phase == 'val': train = False transform = SimSiamTransform(args.crop_size, train) elif phase == 'linear_train': train = True transform = transforms.Compose([ transforms.RandomResizedCrop(args.crop_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(*imagenet_mean_std) ]) elif phase == 'linear_val': train = False transform = transforms.Compose([ transforms.Resize(int(args.crop_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256 transforms.CenterCrop(args.crop_size), transforms.ToTensor(), transforms.Normalize(*imagenet_mean_std) ]) dataset = torchvision.datasets.CIFAR10(root="CIFAR10_Dataset", train=train, transform=transform, download=download) if debug_subset_size is not None: dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch dataset.classes = dataset.dataset.classes dataset.targets = dataset.dataset.targets return dataset
/datasets/spatial.py
# in : original image # out : cropped img1 (anchor) # cropped img2 (compete) # target (positive img1 - img2 : 1, negative img1 - img2 : 0) import os from glob import glob import random import numpy as np from PIL import Image from PIL import ImageFilter import torch import torch.utils.data as data import torchvision.transforms.functional as F from torchvision import transforms random.seed(765) def divide_patches(img, row, col): patche_size_w = int(img.size[0] / col) patche_size_h = int(img.size[1] / row) patches = [] for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)): if cnt_i == row: break for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)): if cnt_j == col: break box = (j, i, j+patche_size_w, i+patche_size_h) patches.append(img.crop(box)) return patches def create_pos_pair(patches): idx = random.randint(0, len(patches)-1) img1 = patches[idx] img2 = patches[idx] target = np.array([1]) return img1, img2, target def create_neg_pair(patches): idx = random.sample(range(0, len(patches)-1), k=2) img1 = patches[idx[0]] img2 = patches[idx[1]] target = np.array([0]) return img1, img2, target def random_crop(im_h, im_w, crop_h, crop_w): res_h = im_h - crop_h res_w = im_w - crop_w i = random.randint(0, res_h) j = random.randint(0, res_w) return i, j, crop_h, crop_w class GaussianBlur(object): """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709""" def __init__(self, sigma=[.1, 2.]): self.sigma = sigma def __call__(self, x): sigma = random.uniform(self.sigma[0], self.sigma[1]) x = x.filter(ImageFilter.GaussianBlur(radius=sigma)) return x class PosNegSpatialDataset(data.Dataset): # divide_num : 3 -> 3x3= 9 paches def __init__(self, data_path, crop_size, divide_num=(3,3), aug=True): self.data_path = data_path self.im_list = sorted(glob(os.path.join(self.data_path, '*.jpg'))) self.c_size = crop_size self.d_row = divide_num[0] self.d_col = divide_num[1] if aug: self.aug = transforms.Compose([ transforms.CenterCrop(self.c_size), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), transforms.RandomHorizontalFlip() ]) else: self.aug = transforms.CenterCrop(self.c_size) self.trans = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) def __len__(self): return len(self.im_list) def __getitem__(self, index): img_path = self.im_list[index] img = Image.open(img_path).convert('RGB') patches = divide_patches(img, self.d_row, self.d_col) if random.random() > 0.5: img1, img2, target = create_pos_pair(patches) else: img1, img2, target = create_neg_pair(patches) img1 = self.aug(img1) img2 = self.aug(img2) target = torch.from_numpy(target).long() img1 = self.trans(img1) img2 = self.trans(img2) return img1, img2, target, None class SpatialDataset(data.Dataset): # divide_num : 3 -> 3x3= 9 paches def __init__(self, phase, data_path, crop_size, divide_num=(3,3), aug=True): with open(os.path.join(data_path, '{}.txt'.format(phase)), 'r') as f: im_list = f.readlines() self.im_list = [im_name.replace('\n', '') for im_name in im_list] self.c_size = crop_size self.d_row = divide_num[0] self.d_col = divide_num[1] self.trans = transforms.Compose([ transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) def __len__(self): return len(self.im_list) def __getitem__(self, index): img_path = self.im_list[index] img = Image.open(img_path).convert('RGB') patches = divide_patches(img, self.d_row, self.d_col) img1, img2, label = create_pos_pair(patches) assert img1.size == img2.size wd, ht = img1.size i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size) img1 = F.crop(img1, i, j, h, w) img2 = F.crop(img2, i, j, h, w) img1 = self.trans(img1) img2 = self.trans(img2) imgs = (img1, img2) return imgs, label
/exp.py
import torch import torchvision from PIL import Image from matplotlib import pyplot as plt import random model = torchvision.models.__dict__['vgg19']() print(model) img = torch.rand(1,3,256,256) out = model.features(img) print(out.size()) import torchvision.transforms as trans crop = trans.RandomCrop(224) img = torch.rand(1,3,256,256) out = crop(img) print(out.size()) def divide_patches(img, row, col): patche_size_w = int(img.size[0] / col) patche_size_h = int(img.size[1] / row) patches = [] for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)): if cnt_i == row: break for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)): if cnt_j == col: break box = (j, i, j+patche_size_w, i+patche_size_h) patches.append(img.crop(box)) return patches def display_images( images: [Image], row=3, col=3, width=10, height=4, max_images=15, label_wrap_length=50, label_font_size=8): if not images: print("No images to display.") return if len(images) > max_images: print(f"Showing {max_images} images of {len(images)}:") images=images[0:max_images] height = max(height, int(len(images)/col) * height) plt.figure(figsize=(width, height)) for i, image in enumerate(images): plt.subplot(row, col, i + 1) plt.imshow(image) plt.show() image = Image.open("/mnt/hdd02/shibuya_scramble/image_000294.jpg").convert("RGB") p = divide_patches(image, 2, 3) print(len(p)) display_images(p, row=2, col=3) def create_pos_pair(patches): idx = random.randint(0, len(patches)-1) img1 = patches[idx] img2 = patches[idx] label = 1 return img1, img2, label def create_neg_pair(patches): idx = random.sample(range(0, len(patches)-1), k=2) img1 = patches[idx[0]] img2 = patches[idx[1]] label = 0 return img1, img2, label def get_img(img): patches = divide_patches(img, 3, 2) if random.random() > 0.5: img1, img2, label = create_pos_pair(patches) else: img1, img2, label = create_neg_pair(patches) return img1, img2, label res = [] for i in range(10): img1, img2, label = get_img(image) flag = False if img1 == img2: flag = True res.append([flag, label]) print(res)
/linear_eval.py
import os import argparse import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler from torch.utils.data import DataLoader import torchvision.models as models from datasets.cifar10 import get_simsiam_dataset from models.create_linear_eval_model import LinearEvalModel from utils.visualizer import AccLossGraphPloter from utils.logger import setlogger args = None def parse_args(): parser = argparse.ArgumentParser(description='Test ') parser.add_argument('--save-dir', default='/mnt/hdd02/contrastive-learn/0113-193048', help='model directory') parser.add_argument('--device', default='0', help='assign device') parser.add_argument('--arch', default='vgg19', help='model architecture') parser.add_argument('--max-epoch', default=100, type=int, help='train epoch') parser.add_argument('--crop-size', default=224, type=int, help='input size') parser.add_argument('--batch-size', default=512, type=int, help='input size') parser.add_argument('--lr', default=1e-1, type=float, help='learning rate') parser.add_argument('--momentum', default=0.9, type=float, help='momentum') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu plotter = AccLossGraphPloter(args.save_dir) setlogger(os.path.join(args.save_dir, 'eval.log')) # set logger datasets = {x: get_simsiam_dataset(args, x) for x in ['linear_train', 'linear_val']} dataloaders = {x: DataLoader(datasets[x], batch_size=(args.batch_size), shuffle=(True if x == 'linear_train' else False), num_workers=8, pin_memory=(True if x == 'linear_train' else False)) for x in ['linear_train', 'linear_val']} device = torch.device('cuda') model = LinearEvalModel(arch=args.arch) model.weight_init(args.save_dir, device, args.arch) ## initialize & freeze criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[40, 60, 80], gamma=0.1) ## Training & Test Roop model.to(device) for epoch in range(args.max_epoch): model.train() losses, acc, step, total = 0., 0., 0., 0. for data, target in dataloaders['linear_train']: data, target = data.to(device), target.to(device) logits = model(data) optimizer.zero_grad() loss = criterion(logits, target) loss.backward() losses += loss.item() optimizer.step() scheduler.step() pred = F.softmax(logits, dim=-1).max(-1)[1] acc += pred.eq(target).sum().item() step += 1 total += target.size(0) tr_loss = losses / step tr_acc = acc / total * 100. logging.info('[Train Epoch: {0:2d}], loss: {1:.3f}, acc: {2:.3f}'.format(epoch, tr_loss, tr_acc)) model.eval() losses, acc, step, total = 0., 0., 0., 0. with torch.no_grad(): for data, target in dataloaders['linear_val']: data, target = data.to(device), target.to(device) logits = model(data) loss = criterion(logits, target) losses += loss.item() pred = F.softmax(logits, dim=-1).max(-1)[1] acc += pred.eq(target).sum().item() step += 1 total += target.size(0) vl_loss = losses / step vl_acc = acc / total * 100. logging.info('[Test Epoch: {0:2d}], loss: {1:.3f} acc: {2:.2f}'.format(epoch, vl_loss, vl_acc)) plotter(epoch, tr_acc, vl_acc, tr_loss, vl_loss, args.arch)
/models/cosine_contrastive_loss.py
import torch import torch.nn as nn import torch.nn.functional as F def D(p, z, version='simplified'): # negative cosine similarity if version == 'original': z = z.detach() # stop gradient p = F.normalize(p, dim=1) # l2-normalize z = F.normalize(z, dim=1) # l2-normalize return -(p*z).sum(dim=1).mean() elif version == 'simplified': return - F.cosine_similarity(p, z.detach(), dim=-1).mean() else: raise Exception class CosineContrastiveLoss(nn.Module): def __init__(self): super().__init__() def forward(self, z1, z2, p1, p2): if z1.dim() != 2: z1 = z1.squeeze() if z2.dim() != 2: z2 = z2.squeeze() if p1 is not None or p2 is not None: loss = D(p1, z2) / 2 + D(p2, z1) / 2 else: loss = D(z1, z2) return loss
/models/create_linear_eval_model.py
import os from collections import OrderedDict import torch import torch.nn as nn import torchvision.models as models class LinearEvalModel(nn.Module): def __init__(self, arch='vgg19', dim=512, num_classes=10): super().__init__() if arch == 'vgg19': self.features = models.vgg19().features if arch == 'vgg19_bn': self.features = models.vgg19_bn().features elif arch == 'resnet18': resnet18 = models.resnet18(pretrained=False) self.features = nn.Sequential(*list(resnet18.children())[:-1]) self.avg_pool = nn.AdaptiveAvgPool2d((1,1)) self.fc = nn.Linear(dim, num_classes) def weight_init(self, weight_path, device, arch): state_dict = torch.load(os.path.join(weight_path, 'best_model.pth'), device) new_state_dict = OrderedDict() if 'resnet' in arch: for k, v in state_dict.items(): if 'encoder' in k: k = k.replace('encoder.', '') new_state_dict[k] = v self.features.load_state_dict(new_state_dict) elif 'vgg' in arch: for k, v in state_dict.items(): if 'encoder' in k: k = k.replace('encoder.0.', '') new_state_dict[k] = v self.features.load_state_dict(new_state_dict) for m in self.features.parameters(): m.requires_grad = False def forward(self, x): x = self.features(x) x = self.avg_pool(x) x = x.squeeze() out = self.fc(x) return out
/models/l2_contrastive_loss.py
import torch import torch.nn as nn import torch.nn.functional as F class L2ContrastiveLoss(nn.Module): """ Contrastive loss Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise Args : output1 & output2 : [N, dim] target : [N] """ def __init__(self, margin=1.0): super().__init__() self.margin = margin self.eps = 1e-9 def forward(self, output1, output2, target, size_average=True): target = target.squeeze() distances = (output2 - output1).pow(2).sum(1) # squared distances losses = 0.5 * (target.float() * distances + (1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2)) return losses.mean() if size_average else losses.sum()
/models/siamese_net.py
import torch import torch.nn as nn class SiameseNetwork(nn.Module): def __init__(self, model, pretrained=False, simple_model=False): super(SiameseNetwork, self).__init__() self.simple_model = simple_model if simple_model: self.features = nn.Sequential(nn.Conv2d(3, 32, 5), nn.PReLU(), nn.MaxPool2d(2, stride=2), nn.Conv2d(32, 64, 5), nn.PReLU(), nn.MaxPool2d(2, stride=2), nn.Conv2d(64, 64, 5), nn.PReLU(), nn.MaxPool2d(2, stride=2)) self.classifier = nn.Sequential(nn.Linear(64 * 4 * 4, 256), nn.PReLU(), nn.Linear(256, 256), nn.PReLU(), nn.Linear(256, 2)) else: if pretrained: self.encoder = model(pretrained=True) self.encoder.classifier = nn.Sequential(*[self.encoder.classifier[i] for i in range(6)]) self.encoder.classifier.add_module('out', nn.Linear(4096, 2)) else: self.encoder = model(num_classes=2) def forward_once(self, x): if self.simple_model: output = self.features(x) output = output.view(output.size()[0], -1) output = self.classifier(output) else: output = self.encoder(x) return output def forward(self, input1, input2): output1 = self.forward_once(input1) output2 = self.forward_once(input2) return output1, output2
/models/simple_siamese_net_tmp.py
import torch import torch.nn as nn class projection_MLP(nn.Module): def __init__(self, in_dim=512, hidden_dim=512, out_dim=512): # bottleneck structure super().__init__() self.layers = nn.Sequential( nn.Linear(in_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, out_dim) ) def forward(self, x): if x.dim() != 2: x = x.squeeze() x = self.layers(x) return x class prediction_MLP(nn.Module): def __init__(self, in_dim=512, hidden_dim=256, out_dim=512): # bottleneck structure super().__init__() self.layer1 = nn.Sequential( nn.Linear(in_dim, hidden_dim), nn.ReLU(inplace=True) ) self.layer2 = nn.Linear(hidden_dim, out_dim) def forward(self, x): if x.dim() != 2: x = x.squeeze() x = self.layer1(x) x = self.layer2(x) return x class SiameseNetwork(nn.Module): def __init__(self, model, pattern_feature = 'conv-512x1x1', projection=False, prediction=False): super(SiameseNetwork, self).__init__() self.projection = projection self.prediction = prediction if pattern_feature == 'conv-512x1x1': features = model().features max_pool = nn.AdaptiveAvgPool2d((1,1)) self.encoder = nn.Sequential(features, max_pool) if projection: self.projector = projection_MLP(in_dim=512, hidden_dim=512, out_dim=512) if prediction: self.predictor = prediction_MLP(in_dim=512, out_dim=512) elif pattern_feature == 'fc-4096': features = model() self.encoder = nn.Sequential(*[self.encoder.classifier[0]]) if projection: self.projector = projection_MLP(in_dim=4096, hidden_dim=4096, out_dim=4096) if prediction: self.predictor = prediction_MLP(in_dim=4096, out_dim=4096) def forward(self, input1, input2): if self.prediction: f, h = self.encoder, self.predictor z1, z2 = f(input1), f(input2) if self.projection: z1, z2 = self.projection(input1), self.projection(input2) p1, p2 = h(z1), h(z2) else: f = self.encoder z1, z2 = f(input1), f(input2) if self.projection: z1, z2 = self.projection(input1), self.projection(input2) p1, p2 = None, None return (z1, z2), (p1, p2)
/train.py
from utils.contrastive_trainer import CoTrainer from utils.simsiam_trainer import SimSiamTrainer import argparse import os import math import torch args = None def parse_args(): parser = argparse.ArgumentParser(description='Train ') parser.add_argument('--data-dir', default='/mnt/hdd02/process-ucf', help='training data directory') parser.add_argument('--save-dir', default='D:/exp_results', help='directory to save models.') parser.add_argument('--cifar10', action='store_true', help='use cifar10 dataset') parser.add_argument('--SimSiam', action='store_true', help='try Simple Siamese Net') parser.add_argument('--arch', type=str, default='vgg19', help='the model architecture [vgg19, vgg19_bn, resnet18]') parser.add_argument('--pattern-feature', type=str, default='conv-512x1x1', help='the feature to contrast [conv-512x1x1, fc-4096]') parser.add_argument('--projection', action='store_true', help='use MLP projection') parser.add_argument('--prediction', action='store_true', help='use MLP prediction') parser.add_argument('--mlp-bn', action='store_true', help='use MLP Batch Normalization') parser.add_argument('--lr', type=float, default=1e-2, help='the initial learning rate') parser.add_argument('--weight-decay', type=float, default=1e-4, help='the weight decay') parser.add_argument('--momentum', type=float, default=0.9, help='the momentum') parser.add_argument('--div-row', type=int, default=3, help='one side`s number of pathes') parser.add_argument('--div-col', type=int, default=3, help='one side`s number of pathes') parser.add_argument('--aug', action='store_true', help='the weight decay') parser.add_argument('--margin', type=float, default=1.0, help='the margin of loss function') parser.add_argument('--resume', default='', help='the path of resume training model') parser.add_argument('--max-model-num', type=int, default=30, help='max models num to save ') parser.add_argument('--check_point', type=int, default=100, help='milestone of save model checkpoint') parser.add_argument('--max-epoch', type=int, default=300, help='max training epoch') parser.add_argument('--val-epoch', type=int, default=10, help='the num of steps to log training information') parser.add_argument('--val-start', type=int, default=0, help='the epoch start to val') parser.add_argument('--batch-size', type=int, default=8, help='train batch size') parser.add_argument('--device', default='0', help='assign device') parser.add_argument('--num-workers', type=int, default=8, help='the num of training process') parser.add_argument('--crop-size', type=int, default=224, help='the crop size of the train image') parser.add_argument('--visual-num', type=int, default=4, help='the number of visualize images') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() torch.backends.cudnn.benchmark = True os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip('-') # set vis gpu if args.SimSiam: trainer = SimSiamTrainer(args) else: trainer = CoTrainer(args) trainer.setup() trainer.train()
/train_val_split.py
import os from glob import glob import numpy as np import argparse def parse_args(): parser = argparse.ArgumentParser(description='Test ') parser.add_argument('--data-dir', default='/mnt/hdd02/shibuya_scramble', help='original data directory') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() ## Random Train-Val split im_list = sorted(glob(os.path.join(args.data_dir, '*.jpg'))) im_list = [im_name for im_name in im_list] tr_im_list = list(np.random.choice(im_list, size=int(len(im_list)*0.8), replace=False)) vl_im_list = list(set(im_list) - set(tr_im_list)) for phase in ['train', 'val']: with open(os.path.join(args.data_dir, './{}.txt'.format(phase)), mode='w') as f: if phase == 'train': f.write('\n'.join(tr_im_list)) elif phase == 'val': f.write('\n'.join(vl_im_list))
/utils/contrastive_trainer.py
import os import sys import time import logging import numpy as np import torch from torch import optim from torch.optim import lr_scheduler from torch.utils.data import DataLoader import torchvision.models as models import torchvision.datasets as datasets from models.siamese_net import SiameseNetwork from models.l2_contrastive_loss import L2ContrastiveLoss from utils.trainer import Trainer from utils.helper import Save_Handle, AverageMeter, worker_init_fn from utils.visualizer import ImageDisplayer, EmbeddingDisplayer from datasets.spatial import SpatialDataset from datasets.cifar10 import PosNegCifar10 class CoTrainer(Trainer): def setup(self): """initialize the datasets, model, loss and optimizer""" args = self.args self.vis = ImageDisplayer(args, self.save_dir) self.emb = EmbeddingDisplayer(args, self.save_dir) if torch.cuda.is_available(): self.device = torch.device("cuda") self.device_count = torch.cuda.device_count() logging.info('using {} gpus'.format(self.device_count)) else: raise Exception("gpu is not available") if args.cifar10: # Download and create datasets or_train = datasets.CIFAR10(root="CIFAR10_Dataset", train=True, transform=None, download=True) or_val = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=None, download=True) # splits CIFAR10 into two streams self.datasets = {x: PosNegCifar10((or_train if x == 'train' else or_val), phase=x) for x in ['train', 'val']} else: self.datasets = {x: SpatialDataset(os.path.join(args.data_dir, x), args.crop_size, args.div_num, args.aug) for x in ['train', 'val']} self.dataloaders = {x: DataLoader(self.datasets[x], batch_size=args.batch_size, shuffle=(True if x == 'train' else False), num_workers=args.num_workers*self.device_count, pin_memory=(True if x == 'train' else False), worker_init_fn=worker_init_fn) for x in ['train', 'val']} # Define model, loss, optim self.model = SiameseNetwork(models.__dict__[args.arch], pattern_feature = args.pattern_feature) self.model.to(self.device) self.criterion = L2ContrastiveLoss(args.margin) self.criterion.to(self.device) self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1) self.start_epoch = 0 self.best_loss = np.inf if args.resume: suf = args.resume.rsplit('.', 1)[-1] if suf == 'tar': checkpoint = torch.load(args.resume, self.device) self.model.load_state_dict(checkpoint['model_state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.start_epoch = checkpoint['epoch'] + 1 elif suf == 'pth': self.model.load_state_dict(torch.load(args.resume, self.device)) self.save_list = Save_Handle(max_num=args.max_model_num) def train(self): """training process""" args = self.args for epoch in range(self.start_epoch, args.max_epoch): logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5) self.epoch = epoch self.train_epoch(epoch) if epoch % args.val_epoch == 0 and epoch >= args.val_start: self.val_epoch(epoch) def train_epoch(self, epoch): epoch_loss = AverageMeter() epoch_start = time.time() self.model.train() # Set model to training mode for step, (input1, input2, target, label) in enumerate(self.dataloaders['train']): input1 = input1.to(self.device) input2 = input2.to(self.device) target = target.to(self.device) with torch.set_grad_enabled(True): output1, output2 = self.model(input1, input2) loss = self.criterion(output1, output2, target) epoch_loss.update(loss.item(), input1.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() self.scheduler.step() # visualize if step == 0: self.vis(epoch, 'train', input1, input2, target) self.emb(output1, label, epoch, 'train') logging.info('Epoch {} Train, Loss: {:.5f}, Cost {:.1f} sec' .format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start)) model_state_dic = self.model.state_dict() save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch)) torch.save({ 'epoch': self.epoch, 'optimizer_state_dict': self.optimizer.state_dict(), 'model_state_dict': model_state_dic }, save_path) self.save_list.append(save_path) # control the number of saved models def val_epoch(self, epoch): epoch_start = time.time() self.model.eval() # Set model to evaluate mode epoch_loss = AverageMeter() for step, (input1, input2, target, label) in enumerate(self.dataloaders['val']): input1 = input1.to(self.device) input2 = input2.to(self.device) target = target.to(self.device) with torch.set_grad_enabled(False): output1, output2 = self.model(input1, input2) loss = self.criterion(output1, output2, target) epoch_loss.update(loss.item(), input1.size(0)) # visualize if step == 0: self.vis(epoch, 'val', input1, input2, target) self.emb(output1, label, epoch, 'val') logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec' .format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start)) model_state_dic = self.model.state_dict() if self.best_loss > epoch_loss.get_avg(): self.best_loss = epoch_loss.get_avg() logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch)) torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
/utils/helper.py
import os import numpy as np import torch def worker_init_fn(worker_id): np.random.seed(np.random.get_state()[1][0] + worker_id) class Save_Handle(object): """handle the number of """ def __init__(self, max_num): self.save_list = [] self.max_num = max_num def append(self, save_path): if len(self.save_list) < self.max_num: self.save_list.append(save_path) else: remove_path = self.save_list[0] del self.save_list[0] self.save_list.append(save_path) if os.path.exists(remove_path): os.remove(remove_path) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = 1.0 * self.sum / self.count def get_avg(self): return self.avg def get_count(self): return self.count ## cannot use in training @torch.no_grad() def accuracy(meter, output1, output2, target): """Computes the accuracy overthe predictions""" for logit in [output1, output2]: corrects = (torch.max(logit, 1)[1].data == target.squeeze().long().data).sum() accu = float(corrects) / float(target.size()[0]) meter.update(accu) return meter
/utils/simsiam_trainer.py
import os import sys import time import logging import numpy as np import torch from torch import optim from torch.optim import lr_scheduler from torch.utils.data import DataLoader import torchvision.models as models import torchvision.datasets as datasets from models.simple_siamese_net import SiameseNetwork from models.cosine_contrastive_loss import CosineContrastiveLoss from utils.trainer import Trainer from utils.helper import Save_Handle, AverageMeter, worker_init_fn from utils.visualizer import ImageDisplayer, LossGraphPloter from datasets.spatial import SpatialDataset from datasets.cifar10 import PosNegCifar10, get_simsiam_dataset class SimSiamTrainer(Trainer): def setup(self): """initialize the datasets, model, loss and optimizer""" args = self.args self.vis = ImageDisplayer(args, self.save_dir) self.tr_graph = LossGraphPloter(self.save_dir) self.vl_graph = LossGraphPloter(self.save_dir) if torch.cuda.is_available(): self.device = torch.device("cuda") self.device_count = torch.cuda.device_count() logging.info('using {} gpus'.format(self.device_count)) else: raise Exception("gpu is not available") if args.cifar10: self.datasets = {x: get_simsiam_dataset(args, x) for x in ['train', 'val']} else: self.datasets = {x: SpatialDataset(x, args.data_dir, args.crop_size, (args.div_row, args.div_col), args.aug) for x in ['train', 'val']} self.dataloaders = {x: DataLoader(self.datasets[x], batch_size=args.batch_size, shuffle=(True if x == 'train' else False), num_workers=args.num_workers*self.device_count, pin_memory=(True if x == 'train' else False), worker_init_fn=worker_init_fn) for x in ['train', 'val']} # Define model, loss, optim self.model = SiameseNetwork(args) self.model.to(self.device) self.criterion = CosineContrastiveLoss() self.criterion.to(self.device) self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) #self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1) self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=args.max_epoch) self.start_epoch = 0 self.best_loss = np.inf if args.resume: suf = args.resume.rsplit('.', 1)[-1] if suf == 'tar': checkpoint = torch.load(args.resume, self.device) self.model.load_state_dict(checkpoint['model_state_dict']) self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) self.start_epoch = checkpoint['epoch'] + 1 elif suf == 'pth': self.model.load_state_dict(torch.load(args.resume, self.device)) self.save_list = Save_Handle(max_num=args.max_model_num) def train(self): """training process""" args = self.args for epoch in range(self.start_epoch, args.max_epoch): logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5) self.epoch = epoch self.train_epoch(epoch) if epoch % args.val_epoch == 0 and epoch >= args.val_start: self.val_epoch(epoch) def train_epoch(self, epoch): epoch_loss = AverageMeter() epoch_start = time.time() self.model.train() # Set model to training mode for step, ((input1, input2), label) in enumerate(self.dataloaders['train']): input1 = input1.to(self.device) input2 = input2.to(self.device) with torch.set_grad_enabled(True): (z1, z2), (p1, p2) = self.model(input1, input2) loss = self.criterion(z1, z2, p1, p2) epoch_loss.update(loss.item(), input1.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() self.scheduler.step() # visualize if step == 0: self.vis(epoch, 'train', input1, input2, label) pass logging.info('Epoch {} Train, Loss: {:.5f}, lr: {:.5f}, Cost {:.1f} sec' .format(self.epoch, epoch_loss.get_avg(), self.optimizer.param_groups[0]['lr'], time.time()-epoch_start)) self.tr_graph(self.epoch, epoch_loss.get_avg(), 'tr') if epoch % self.args.check_point == 0: model_state_dic = self.model.state_dict() save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch)) torch.save({ 'epoch': self.epoch, 'optimizer_state_dict': self.optimizer.state_dict(), 'model_state_dict': model_state_dic }, save_path) self.save_list.append(save_path) # control the number of saved models def val_epoch(self, epoch): epoch_start = time.time() self.model.eval() # Set model to evaluate mode epoch_loss = AverageMeter() for step, ((input1, input2), label) in enumerate(self.dataloaders['val']): input1 = input1.to(self.device) input2 = input2.to(self.device) with torch.set_grad_enabled(False): (z1, z2), (p1, p2) = self.model(input1, input2) loss = self.criterion(z1, z2, p1, p2) epoch_loss.update(loss.item(), input1.size(0)) # visualize if step == 0: self.vis(epoch, 'val', input1, input2, label) pass logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec' .format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start)) self.vl_graph(self.epoch, epoch_loss.get_avg(), 'vl') model_state_dic = self.model.state_dict() if self.best_loss > epoch_loss.get_avg(): self.best_loss = epoch_loss.get_avg() logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch)) torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
/utils/visualizer.py
import os import numpy as np from PIL import Image import torch import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt ### torch テンソル(バッチ)を受け取って、args.div_numに応じて、描画する mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) def invnorm(img, N): img = img[N,:,:,:].to('cpu').detach().numpy().copy() img = img.transpose(1,2,0) img = img*std+mean return img class ImageDisplayer: def __init__(self, args, save_fir): # N is number of batch to display self.args = args self.save_dir = save_fir self.N = args.visual_num @torch.no_grad() def __call__(self, epoch, prefix, img1, img2, target): imgs1 = [] imgs2 = [] targets = [] for n in range(self.N): imgs1.append(invnorm(img1,n)) imgs2.append(invnorm(img2,n)) if target is not None: targets.append(target[n].item()) else: targets = None self.display_images(epoch, prefix, imgs1, imgs2, targets) def display_images(self, epoch, prefix, images1: [Image], images2: [Image], targets, columns=2, width=8, height=8, label_wrap_length=50, label_font_size=8): if not (images1 and images2): print("No images to display.") return height = max(height, int(len(images1)/columns) * height) plt.figure(figsize=(width, height)) i = 1 if targets is not None: for (im1, im2, tar) in zip(images1, images2, targets): im1 = Image.fromarray(np.uint8(im1*255)) im2 = Image.fromarray(np.uint8(im2*255)) plt.subplot(self.N, 2, i) plt.title(tar, fontsize=20) plt.imshow(im1) i += 1 plt.subplot(self.N, 2, i) plt.title(tar, fontsize=20) plt.imshow(im2) i += 1 else: for (im1, im2) in zip(images1, images2): im1 = Image.fromarray(np.uint8(im1*255)) im2 = Image.fromarray(np.uint8(im2*255)) plt.subplot(self.N, 2, i) plt.imshow(im1) i += 1 plt.subplot(self.N, 2, i) plt.imshow(im2) i += 1 plt.tight_layout() output_img_name = 'imgs_{}_{}.png'.format(prefix, epoch) plt.savefig(os.path.join(self.save_dir, 'images', output_img_name)) plt.close() class EmbeddingDisplayer: def __init__(self, args, save_fir): self.args = args self.save_dir = save_fir self.cifar10_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] self.colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] @torch.no_grad() def __call__(self, embeddings, targets, epoch, prefix, xlim=None, ylim=None): embeddings = embeddings.to('cpu').detach().numpy().copy() targets = targets.to('cpu').detach().numpy().copy() plt.figure(figsize=(10,10)) for i in range(10): inds = np.where(targets==i)[0] plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=self.colors[i]) if xlim: plt.xlim(xlim[0], xlim[1]) if ylim: plt.ylim(ylim[0], ylim[1]) plt.legend(self.cifar10_classes) output_img_name = 'emb_{}_{}.png'.format(prefix, epoch) plt.savefig(os.path.join(self.save_dir, 'images', output_img_name)) plt.close() class LossGraphPloter: def __init__(self, save_fir): self.save_dir = save_fir self.epochs = [] self.losses = [] def __call__(self, epoch, loss, prefix): self.epochs.append(epoch) self.losses.append(loss) output_img_name = '{}_loss.svg'.format(prefix) plt.plot(self.epochs, self.losses) plt.title('Loss') plt.savefig(os.path.join(self.save_dir, 'images', output_img_name)) plt.close() class AccLossGraphPloter: def __init__(self, save_fir): self.save_dir = save_fir self.tr_accs = [] self.vl_accs = [] self.tr_losses = [] self.vl_losses = [] self.epochs = [] def __call__(self, epoch, tr_acc, vl_acc, tr_loss, vl_loss, prefix): self.tr_accs.append(tr_acc) self.vl_accs.append(vl_acc) self.tr_losses.append(tr_loss) self.vl_losses.append(vl_loss) self.epochs.append(epoch) output_img_name = '{}_eval.svg'.format(prefix) fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4)) axL.plot(self.epochs, self.tr_accs, label='train') axL.plot(self.epochs, self.vl_accs, label='val') axL.set_title('Top-1 Accuracy') axL.set_xlabel('epoch') axL.set_ylabel('acc [%]') axL.legend(loc="lower right") axR.plot(self.epochs, self.tr_losses, label='train') axR.plot(self.epochs, self.vl_losses, label='val') axR.set_title('Loss') axR.set_xlabel('epoch') axR.set_ylabel('loss') axR.legend(loc="upper right") plt.savefig(os.path.join(self.save_dir, 'images', output_img_name)) plt.close()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Peroxidess/Ablation-Time-Prediction-Model
refs/heads/main
{"/Regression/src/eval.py": ["/Regression/src/model/history_.py"], "/Regression/src/preprocess/plot_tabel.py": ["/Regression/src/model/history_.py"], "/Regression/src/main.py": ["/Regression/src/model/history_.py", "/Regression/src/model/training_.py", "/Regression/src/preprocess/get_dataset.py", "/Regression/src/preprocess/load_data.py"], "/Regression/src/learn_weight_main.py": ["/Regression/src/model/training_.py", "/Regression/src/preprocess/get_dataset.py", "/Regression/src/learn_rewieght/reweight.py", "/Regression/src/preprocess/load_data.py"], "/Regression/src/model/training_.py": ["/Regression/src/model/evaluate.py", "/Regression/src/model/bulid_model.py"], "/Regression/src/useless/ave_logsit_baseline.py": ["/Regression/src/preprocess/get_dataset.py", "/Regression/src/preprocess/load_data.py"], "/Regression/src/useless/keras_att.py": ["/Regression/src/preprocess/get_dataset.py", "/Regression/src/preprocess/load_data.py"]}
└── └── Regression └── src ├── eval.py ├── learn_rewieght │ ├── mnist_train.py │ └── reweight.py ├── learn_weight_main.py ├── main.py ├── model │ ├── bulid_model.py │ ├── evaluate.py │ ├── history_.py │ └── training_.py ├── preprocess │ ├── get_dataset.py │ ├── load_data.py │ └── plot_tabel.py └── useless ├── ave_logsit_baseline.py └── keras_att.py
/Regression/src/eval.py
from model.history_ import plot_metric_df import pandas as pd import matplotlib.pyplot as plt import os xx = os.getcwd() path_root = '../report/result/' task_name = 'ablation_time_all' metric_list = [] metric_list_dir = ['metric_ablation_time_enh_10nrun_1Fold.csv', 'metric_ablation_time_vanilla_10nrun_1Fold.csv', 'metric_gbm_10nrun_1Fold.csv', 'metric_lr_10nrun_1Fold.csv', ] for metric_dir in metric_list_dir: dir = path_root + metric_dir metric_df = pd.read_csv(dir) metric_list.append(metric_df) plot_metric_df(metric_list, task_name, val_flag='val_') plt.show() pass
/Regression/src/learn_rewieght/mnist_train.py
# Copyright (c) 2017 - 2019 Uber Technologies, Inc. # # Licensed under the Uber Non-Commercial License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at the root directory of this project. # # See the License for the specific language governing permissions and # limitations under the License. # # # # Runs MNIST experitment. Default 10 runs for 10 random seeds. # # Usage: # python -m mnist.imblanace_mnist_train_ad.py # # Flags: # --exp [string] Experiment name, `ours`, `hm`, `ratio`, `random` or `baseline`. # --pos_ratio [float] The ratio for the positive class, choose between 0.9 - 0.995. # --nrun [int] Total number of runs with different random seeds. # --ntrain [int] Number of training examples. # --nval [int] Number of validation examples. # --ntest [int] Number of test examples. # --tensorboard Writes TensorBoard logs while training, default True. # --notensorboard Disable TensorBoard. # --verbose Print training progress, default False. # --noverbose Disable printing. # from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import os import six import tensorflow as tf from collections import namedtuple from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet from tensorflow.examples.tutorials.mnist import input_data from tqdm import tqdm from mnist_.reweight import get_model, reweight_random, reweight_autodiff, reweight_hard_mining from utils.logger import get as get_logger os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' tf.logging.set_verbosity(tf.logging.ERROR) flags = tf.flags flags.DEFINE_float('pos_ratio', 0.995, 'Ratio of positive examples in training') flags.DEFINE_integer('nrun', 10, 'Number of runs') flags.DEFINE_integer('ntest', 500, 'Number of testing examples') flags.DEFINE_integer('ntrain', 5000, 'Number of training examples') flags.DEFINE_integer('nval', 10, 'Number of validation examples') flags.DEFINE_bool('verbose', False, 'Whether to print training progress') flags.DEFINE_bool('tensorboard', True, 'Whether to save training progress') flags.DEFINE_string('exp', 'baseline', 'Which experiment to run') FLAGS = tf.flags.FLAGS log = get_logger() Config = namedtuple('Config', [ 'reweight', 'lr', 'num_steps', 'random', 'ratio_weighted', 'nval', 'hard_mining', 'bsize' ]) exp_repo = dict() def RegisterExp(name): def _decorator(f): exp_repo[name] = f return f return _decorator LR = 0.001 NUM_STEPS = 4000 @RegisterExp('baseline') def baseline_config(): return Config( reweight=False, num_steps=NUM_STEPS * 2, lr=LR, random=False, ratio_weighted=False, hard_mining=False, bsize=100, nval=0) @RegisterExp('hm') def baseline_config(): return Config( reweight=False, num_steps=NUM_STEPS * 2, lr=LR, random=False, ratio_weighted=False, hard_mining=True, bsize=500, nval=0) @RegisterExp('ratio') def ratio_config(): return Config( reweight=False, num_steps=NUM_STEPS * 2, lr=LR, random=False, ratio_weighted=True, hard_mining=False, bsize=100, nval=0) @RegisterExp('random') def dpfish_config(): return Config( reweight=True, num_steps=NUM_STEPS * 2, lr=LR, random=True, ratio_weighted=False, hard_mining=False, bsize=100, nval=0) @RegisterExp('ours') def ours_config(): return Config( reweight=True, num_steps=NUM_STEPS, lr=LR, random=False, ratio_weighted=False, hard_mining=False, bsize=100, nval=FLAGS.nval) def get_imbalance_dataset(mnist, pos_ratio=0.9, ntrain=5000, nval=10, ntest=500, seed=0, class_0=4, class_1=9): rnd = np.random.RandomState(seed) # In training, we have 10% 4 and 90% 9. # In testing, we have 50% 4 and 50% 9. ratio = 1 - pos_ratio ratio_test = 0.5 x_train = mnist.train.images y_train = mnist.train.labels x_test = mnist.test.images y_test = mnist.test.labels x_train_0 = x_train[y_train == class_0] x_test_0 = x_test[y_test == class_0] # First shuffle, negative. idx = np.arange(x_train_0.shape[0]) rnd.shuffle(idx) x_train_0 = x_train_0[idx] nval_small_neg = int(np.floor(nval * ratio_test)) ntrain_small_neg = int(np.floor(ntrain * ratio)) - nval_small_neg x_val_0 = x_train_0[:nval_small_neg] # 450 4 in validation. x_train_0 = x_train_0[nval_small_neg:nval_small_neg + ntrain_small_neg] # 500 4 in training. if FLAGS.verbose: print('Number of train negative classes', ntrain_small_neg) print('Number of val negative classes', nval_small_neg) idx = np.arange(x_test_0.shape[0]) rnd.shuffle(idx) x_test_0 = x_test_0[:int(np.floor(ntest * ratio_test))] # 450 4 in testing. x_train_1 = x_train[y_train == class_1] x_test_1 = x_test[y_test == class_1] # First shuffle, positive. idx = np.arange(x_train_1.shape[0]) rnd.shuffle(idx) x_train_1 = x_train_1[idx] nvalsmall_pos = int(np.floor(nval * (1 - ratio_test))) ntrainsmall_pos = int(np.floor(ntrain * (1 - ratio))) - nvalsmall_pos x_val_1 = x_train_1[:nvalsmall_pos] # 50 9 in validation. x_train_1 = x_train_1[nvalsmall_pos:nvalsmall_pos + ntrainsmall_pos] # 4500 9 in training. idx = np.arange(x_test_1.shape[0]) rnd.shuffle(idx) x_test_1 = x_test_1[idx] x_test_1 = x_test_1[:int(np.floor(ntest * (1 - ratio_test)))] # 500 9 in testing. if FLAGS.verbose: print('Number of train positive classes', ntrainsmall_pos) print('Number of val positive classes', nvalsmall_pos) y_train_subset = np.concatenate([np.zeros([x_train_0.shape[0]]), np.ones([x_train_1.shape[0]])]) y_val_subset = np.concatenate([np.zeros([x_val_0.shape[0]]), np.ones([x_val_1.shape[0]])]) y_test_subset = np.concatenate([np.zeros([x_test_0.shape[0]]), np.ones([x_test_1.shape[0]])]) y_train_pos_subset = np.ones([x_train_1.shape[0]]) y_train_neg_subset = np.zeros([x_train_0.shape[0]]) x_train_subset = np.concatenate([x_train_0, x_train_1], axis=0).reshape([-1, 28, 28, 1]) x_val_subset = np.concatenate([x_val_0, x_val_1], axis=0).reshape([-1, 28, 28, 1]) x_test_subset = np.concatenate([x_test_0, x_test_1], axis=0).reshape([-1, 28, 28, 1]) x_train_pos_subset = x_train_1.reshape([-1, 28, 28, 1]) x_train_neg_subset = x_train_0.reshape([-1, 28, 28, 1]) # Final shuffle. idx = np.arange(x_train_subset.shape[0]) rnd.shuffle(idx) x_train_subset = x_train_subset[idx] y_train_subset = y_train_subset[idx] idx = np.arange(x_val_subset.shape[0]) rnd.shuffle(idx) x_val_subset = x_val_subset[idx] y_val_subset = y_val_subset[idx] idx = np.arange(x_test_subset.shape[0]) rnd.shuffle(idx) x_test_subset = x_test_subset[idx] y_test_subset = y_test_subset[idx] train_set = DataSet(x_train_subset * 255.0, y_train_subset) train_pos_set = DataSet(x_train_pos_subset * 255.0, y_train_pos_subset) train_neg_set = DataSet(x_train_neg_subset * 255.0, y_train_neg_subset) val_set = DataSet(x_val_subset * 255.0, y_val_subset) test_set = DataSet(x_test_subset * 255.0, y_test_subset) return train_set, val_set, test_set, train_pos_set, train_neg_set def get_exp_logger(sess, log_folder): """Gets a TensorBoard logger.""" with tf.name_scope('Summary'): writer = tf.summary.FileWriter(os.path.join(log_folder), sess.graph) class ExperimentLogger(): def log(self, niter, name, value): summary = tf.Summary() summary.value.add(tag=name, simple_value=value) writer.add_summary(summary, niter) def flush(self): """Flushes results to disk.""" writer.flush() return ExperimentLogger() def evaluate(sess, x_, y_, acc_, train_set, test_set): # Calculate final results. acc_sum = 0.0 acc_test_sum = 0.0 train_bsize = 100 for step in six.moves.xrange(5000 // train_bsize): x, y = train_set.next_batch(train_bsize) acc = sess.run(acc_, feed_dict={x_: x, y_: y}) acc_sum += acc test_bsize = 100 for step in six.moves.xrange(500 // test_bsize): x_test, y_test = test_set.next_batch(test_bsize) acc = sess.run(acc_, feed_dict={x_: x_test, y_: y_test}) acc_test_sum += acc train_acc = acc_sum / float(5000 // train_bsize) test_acc = acc_test_sum / float(500 // test_bsize) return train_acc, test_acc def get_acc(logits, y): prediction = tf.cast(tf.sigmoid(logits) > 0.5, tf.float32) return tf.reduce_mean(tf.cast(tf.equal(prediction, y), tf.float32)) def run(dataset, exp_name, seed, verbose=True): pos_ratio = FLAGS.pos_ratio ntrain = FLAGS.ntrain nval = FLAGS.nval ntest = FLAGS.ntest folder = os.path.join('ckpt_mnist_imbalance_cnn_p{:d}'.format(int(FLAGS.pos_ratio * 100.0)), exp_name + '_{:d}'.format(seed)) if not os.path.exists(folder): os.makedirs(folder) with tf.Graph().as_default(), tf.Session() as sess: config = exp_repo[exp_name]() bsize = config.bsize train_set, val_set, test_set, train_pos_set, train_neg_set = get_imbalance_dataset( dataset, pos_ratio=pos_ratio, ntrain=ntrain, nval=config.nval, ntest=ntest, seed=seed) # if config.nval == 0: # val_set = BalancedDataSet(train_pos_set, train_neg_set) x_ = tf.placeholder(tf.float32, [None, 784], name='x') y_ = tf.placeholder(tf.float32, [None], name='y') x_val_ = tf.placeholder(tf.float32, [None, 784], name='x_val') y_val_ = tf.placeholder(tf.float32, [None], name='y_val') ex_wts_ = tf.placeholder(tf.float32, [None], name='ex_wts') lr_ = tf.placeholder(tf.float32, [], name='lr') # Build training model. with tf.name_scope('Train'): _, loss_c, logits_c = get_model( x_, y_, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=None) train_op = tf.train.MomentumOptimizer(config.lr, 0.9).minimize(loss_c) # Build evaluation model. with tf.name_scope('Val'): _, loss_eval, logits_eval = get_model( x_, y_, is_training=False, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=True) acc_ = get_acc(logits_eval, y_) # Build reweighting model. if config.reweight: if config.random: ex_weights_ = reweight_random(bsize) else: ex_weights_ = reweight_autodiff( x_, y_, x_val_, y_val_, bsize, min(bsize, nval), eps=0.0, gate_gradients=1) else: if config.hard_mining: ex_weights_ = reweight_hard_mining(x_, y_, positive=True) else: if config.ratio_weighted: # Weighted by the ratio of each class. ex_weights_ = pos_ratio * (1 - y_) + (1 - pos_ratio) * (y_) else: # Weighted by uniform. ex_weights_ = tf.ones([bsize], dtype=tf.float32) / float(bsize) if FLAGS.tensorboard: exp_logger = get_exp_logger(sess, folder) else: exp_logger = None lr = config.lr num_steps = config.num_steps acc_sum = 0.0 acc_test_sum = 0.0 loss_sum = 0.0 count = 0 sess.run(tf.global_variables_initializer()) for step in six.moves.xrange(num_steps): x, y = train_set.next_batch(bsize) x_val, y_val = val_set.next_batch(min(bsize, nval)) # Use 50% learning rate for the second half of training. if step > num_steps // 2: lr = config.lr / 2.0 else: lr = config.lr ex_weights = sess.run( ex_weights_, feed_dict={x_: x, y_: y, x_val_: x_val, y_val_: y_val}) loss, acc, _ = sess.run( [loss_c, acc_, train_op], feed_dict={ x_: x, y_: y, x_val_: x_val, y_val_: y_val, ex_wts_: ex_weights, lr_: lr }) if (step + 1) % 100 == 0: train_acc, test_acc = evaluate(sess, x_, y_, acc_, train_set, test_set) if verbose: print('Step', step + 1, 'Loss', loss, 'Train acc', train_acc, 'Test acc', test_acc) if FLAGS.tensorboard: exp_logger.log(step + 1, 'train acc', train_acc) exp_logger.log(step + 1, 'test acc', test_acc) exp_logger.flush() acc_sum = 0.0 loss_sum = 0.0 acc_test_sum = 0.0 count = 0 # Final evaluation. train_acc, test_acc = evaluate(sess, x_, y_, acc_, train_set, test_set) if verbose: print('Final', 'Train acc', train_acc, 'Test acc', test_acc) return train_acc, test_acc def run_many(dataset, exp_name): train_acc_list = [] test_acc_list = [] for trial in tqdm(six.moves.xrange(FLAGS.nrun), desc=exp_name): train_acc, test_acc = run( dataset, exp_name, (trial * 123456789) % 100000, verbose=FLAGS.verbose) train_acc_list.append(train_acc) test_acc_list.append(test_acc) train_acc_list = np.array(train_acc_list) test_acc_list = np.array(test_acc_list) print(exp_name, 'Train acc {:.3f}% ({:.3f}%)'.format(train_acc_list.mean() * 100.0, train_acc_list.std() * 100.0)) print(exp_name, 'Test acc {:.3f}% ({:.3f}%)'.format(test_acc_list.mean() * 100.0, test_acc_list.std() * 100.0)) def main(): mnist = input_data.read_data_sets("data/mnist", one_hot=False) for exp in FLAGS.exp.split(','): run_many(mnist, exp) if __name__ == '__main__': main()
/Regression/src/learn_rewieght/reweight.py
# Copyright (c) 2017 - 2019 Uber Technologies, Inc. # # Licensed under the Uber Non-Commercial License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at the root directory of this project. # # See the License for the specific language governing permissions and # limitations under the License. # # # # Models for MNIST experiments. # from __future__ import division, print_function import numpy as np import tensorflow as tf def get_model(inputs, labels, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=None, reuse=None, ): """ :param inputs: [Tensor] Inputs. :param labels: [Tensor] Labels. :param is_training: [bool] Whether in training mode, default True. :param dtype: [dtype] Data type, default tf.float32. :param w_dict: [dict] Dictionary of weights, default None. :param ex_wts: [Tensor] Example weights placeholder, default None. :param reuse: [bool] Whether to reuse variables, default None. """ if w_dict is None: w_dict = {} def _get_var(name, shape, dtype, initializer): key = tf.get_variable_scope().name + '/' + name if key in w_dict: return w_dict[key] else: var = tf.get_variable(name, shape, dtype, initializer=initializer) w_dict[key] = var return var with tf.variable_scope('Model', reuse=reuse): shape_list = np.append(np.array([-1]), np.squeeze(inputs.shape[1:].as_list())) shape_list_wts = np.append(np.array([-1]), np.squeeze(ex_wts.shape[1:].as_list())) shape_list_fir = np.append(np.squeeze(inputs.shape[1:].as_list()), np.array([1024])) shape_list_sec = np.array([1024, 256]) shape_list_thr = np.array([256, 64]) inputs_ = tf.cast(tf.reshape(inputs, shape_list), dtype) inputs_w = tf.cast(tf.reshape(ex_wts, shape_list_wts), dtype) # inputs_w = tf.matrix_diag(ex_wts) labels = tf.cast(tf.reshape(labels, [-1, 1]), dtype) w_init = tf.truncated_normal_initializer(stddev=0.1) w1 = _get_var('w1', shape_list_fir, dtype, initializer=w_init) w2 = _get_var('w2', shape_list_sec, dtype, initializer=w_init) w3 = _get_var('w3', shape_list_thr, dtype, initializer=w_init) w4 = _get_var('w4', [64, 32], dtype, initializer=w_init) w5 = _get_var('w5', [32, 1], dtype, initializer=w_init) b_init = tf.constant_initializer(0.0) b1 = _get_var('b1', 1, dtype, initializer=b_init) b2 = _get_var('b2', 1, dtype, initializer=b_init) b3 = _get_var('b3', 64, dtype, initializer=b_init) b4 = _get_var('b4', 32, dtype, initializer=b_init) b5 = _get_var('b5', 1, dtype, initializer=b_init) act = tf.nn.relu l0 = tf.identity(inputs_, name='l0') z1 = tf.add(tf.matmul(l0, w1), b1, name='z1') l1 = act(z1, name='l1') # h1 = tf.contrib.layers.batch_norm(l1, center=True, scale=True, is_training=True, scope='bn1') z2 = tf.add(tf.matmul(l1, w2), b2, name='z2') l2 = act(z2, name='l2') # h2 = tf.contrib.layers.batch_norm(l2, center=True, scale=True, is_training=True, scope='bn2') z3 = tf.add(tf.matmul(l2, w3), b3, name='z3') l3 = act(z3, name='l3') # h3 = tf.contrib.layers.batch_norm(l3, center=True, scale=True, is_training=True, scope='bn3') z4 = tf.add(tf.matmul(l3, w4), b4, name='z4') l4 = act(z4, name='l4') # h4 = tf.contrib.layers.batch_norm(l4, center=True, scale=True, is_training=True, scope='bn4') z5 = tf.add(tf.matmul(l4, w5), b5, name='z5') pred = z5 if ex_wts is None: # Average loss. loss = tf.reduce_mean(tf.square(tf.subtract(pred, labels))) else: # Weighted loss. squa = tf.square(tf.subtract(pred, labels)) * inputs_w mse = tf.nn.l2_loss(tf.subtract(pred, labels)) * inputs_w loss = tf.reduce_mean(squa) return w_dict, loss, pred def reweight_random(bsize, eps=0.0): """Reweight examples using random numbers. :param bsize: [int] Batch size. :param eps: [float] Minimum example weights, default 0.0. """ ex_weight = tf.random_normal([bsize], mean=0.0, stddev=1.0) ex_weight_plus = tf.maximum(ex_weight, eps) ex_weight_sum = tf.reduce_sum(ex_weight_plus) ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0)) ex_weight_norm = ex_weight_plus / ex_weight_sum return ex_weight_norm def reweight_autodiff(inp_a, label_a, inp_b, label_b, ex_wts_a, ex_wts_b, bsize_a, bsize_b, eps=0, gate_gradients=1): """Reweight examples using automatic differentiation. :param inp_a: [Tensor] Inputs for the noisy pass. :param label_a: [Tensor] Labels for the noisy pass. :param inp_b: [Tensor] Inputs for the clean pass. :param label_b: [Tensor] Labels for the clean pass. :param bsize_a: [int] Batch size for the noisy pass. :param bsize_b: [int] Batch size for the clean pass. :param eps: [float] Minimum example weights, default 0.0. :param gate_gradients: [int] Tensorflow gate gradients, reduce concurrency. """ # ex_wts_a = tf.ones([bsize_a], dtype=tf.float32) # ex_wts_b = tf.ones([bsize_b], dtype=tf.float32) / float(bsize_b) # ex_wts_b = tf.placeholder(tf.float32, [None, 1], name='ex_wts_b') w_dict, loss_a, logits_a = get_model( inp_a, label_a, ex_wts=ex_wts_a, is_training=True, reuse=True) var_names = w_dict.keys() var_list = [w_dict[kk] for kk in var_names] grads = tf.gradients(loss_a, var_list, gate_gradients=gate_gradients) # grads_w = tf.gradients(loss_a, [ex_wts_a], gate_gradients=gate_gradients) var_list_new = [vv - gg for gg, vv in zip(grads, var_list)] w_dict_new = dict(zip(var_names, var_list_new)) _, loss_b, logits_b = get_model( inp_b, label_b, ex_wts=ex_wts_b, is_training=True, reuse=True, w_dict=w_dict_new) grads_ex_wts = tf.gradients(loss_b, [ex_wts_a], gate_gradients=gate_gradients)[0] ex_weight = -grads_ex_wts ex_weight_plus = tf.maximum(ex_weight, eps) ex_weight_sum = tf.reduce_sum(ex_weight_plus) ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0)) ex_weight_norm = ex_weight_plus / ex_weight_sum return ex_weight_norm, var_list, grads, ex_weight_plus def reweight_hard_mining(inp, label, positive=False): """Reweight examples using hard mining. :param inp: [Tensor] [N, ...] Inputs. :param label: [Tensor] [N] Labels :param positive: [bool] Whether perform hard positive mining or hard negative mining. :return [Tensor] Examples weights of the same shape as the first dim of inp. """ _, loss, logits = get_model(inp, label, ex_wts=None, is_training=True, reuse=True) # Mine for positive if positive: loss_mask = loss * label else: loss_mask = loss * (1 - label) if positive: k = tf.cast(tf.reduce_sum(1 - label), tf.int32) else: k = tf.cast(tf.reduce_sum(label), tf.int32) k = tf.maximum(k, 1) loss_sorted, loss_sort_idx = tf.nn.top_k(loss_mask, k) if positive: mask = 1 - label else: mask = label updates = tf.ones([tf.shape(loss_sort_idx)[0]], dtype=label.dtype) mask_add = tf.scatter_nd(tf.expand_dims(loss_sort_idx, axis=1), updates, [tf.shape(inp)[0]]) mask = tf.maximum(mask, mask_add) mask_sum = tf.reduce_sum(mask) mask_sum += tf.cast(tf.equal(mask_sum, 0.0), tf.float32) mask = mask / mask_sum return mask
/Regression/src/learn_weight_main.py
# Copyright (c) 2017 - 2019 Uber Technologies, Inc. # # Licensed under the Uber Non-Commercial License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at the root directory of this project. # # See the License for the specific language governing permissions and # limitations under the License. # # # # Runs MNIST experitment. Default 10 runs for 10 random seeds. # # Usage: # python -m mnist.imblanace_mnist_train_ad.py # # Flags: # --exp [string] Experiment name, `ours`, `hm`, `ratio`, `random` or `baseline`. # --pos_ratio [float] The ratio for the positive class, choose between 0.9 - 0.995. # --nrun [int] Total number of runs with different random seeds. # --ntrain [int] Number of training examples. # --nval [int] Number of validation examples. # --ntest [int] Number of test examples. # --tensorboard Writes TensorBoard logs while training, default True. # --notensorboard Disable TensorBoard. # --verbose Print training progress, default False. # --noverbose Disable printing. # from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import pandas as pd import os import six import tensorflow as tf from collections import namedtuple from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet from tensorflow.examples.tutorials.mnist import input_data from tqdm import tqdm from learn_rewieght.reweight import get_model, reweight_random, reweight_autodiff, reweight_hard_mining from preprocess.load_data import load_data_ from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection from model.training_ import training_model, model_training, precision, recall, f1, r2 from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error from sklearn.model_selection import KFold import matplotlib.pyplot as plt os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' tf.logging.set_verbosity(tf.logging.ERROR) flags = tf.flags flags.DEFINE_float('pos_ratio', 0.995, 'Ratio of positive examples in training') flags.DEFINE_integer('nrun', 10, 'Number of runs') flags.DEFINE_integer('ntest', 500, 'Number of testing examples') flags.DEFINE_integer('ntrain', 5000, 'Number of training examples') flags.DEFINE_integer('nval', 10, 'Number of validation examples') flags.DEFINE_bool('verbose', False, 'Whether to print training progress') flags.DEFINE_bool('tensorboard', False, 'Whether to save training progress') flags.DEFINE_string('exp', 'baseline', 'Which experiment to run') FLAGS = tf.flags.FLAGS Config = namedtuple('Config', [ 'reweight', 'lr', 'num_steps', 'random', 'ratio_weighted', 'nval', 'hard_mining', 'bsize' ]) exp_repo = dict() def RegisterExp(name): def _decorator(f): exp_repo[name] = f return f return _decorator LR = 0.001 NUM_STEPS = 4000 @RegisterExp('baseline') def baseline_config(): return Config( reweight=False, num_steps=NUM_STEPS * 2, lr=LR, random=False, ratio_weighted=False, hard_mining=False, bsize=100, nval=0) @RegisterExp('hm') def baseline_config(): return Config( reweight=False, num_steps=NUM_STEPS * 2, lr=LR, random=False, ratio_weighted=False, hard_mining=True, bsize=500, nval=0) @RegisterExp('ratio') def ratio_config(): return Config( reweight=False, num_steps=NUM_STEPS * 2, lr=LR, random=False, ratio_weighted=True, hard_mining=False, bsize=100, nval=0) @RegisterExp('random') def dpfish_config(): return Config( reweight=True, num_steps=NUM_STEPS * 2, lr=LR, random=True, ratio_weighted=False, hard_mining=False, bsize=100, nval=0) @RegisterExp('ours') def ours_config(): return Config( reweight=True, num_steps=NUM_STEPS, lr=LR, random=False, ratio_weighted=False, hard_mining=False, bsize=100, nval=FLAGS.nval) def get_imbalance_dataset(mnist, pos_ratio=0.9, ntrain=5000, nval=10, ntest=500, seed=0, class_0=4, class_1=9): rnd = np.random.RandomState(seed) # In training, we have 10% 4 and 90% 9. # In testing, we have 50% 4 and 50% 9. ratio = 1 - pos_ratio ratio_test = 0.5 x_train = mnist.train.images y_train = mnist.train.labels x_test = mnist.test.images y_test = mnist.test.labels x_train_0 = x_train[y_train == class_0] x_test_0 = x_test[y_test == class_0] # First shuffle, negative. idx = np.arange(x_train_0.shape[0]) rnd.shuffle(idx) x_train_0 = x_train_0[idx] nval_small_neg = int(np.floor(nval * ratio_test)) ntrain_small_neg = int(np.floor(ntrain * ratio)) - nval_small_neg x_val_0 = x_train_0[:nval_small_neg] # 450 4 in validation. x_train_0 = x_train_0[nval_small_neg:nval_small_neg + ntrain_small_neg] # 500 4 in training. if FLAGS.verbose: print('Number of train negative classes', ntrain_small_neg) print('Number of val negative classes', nval_small_neg) idx = np.arange(x_test_0.shape[0]) rnd.shuffle(idx) x_test_0 = x_test_0[:int(np.floor(ntest * ratio_test))] # 450 4 in testing. x_train_1 = x_train[y_train == class_1] x_test_1 = x_test[y_test == class_1] # First shuffle, positive. idx = np.arange(x_train_1.shape[0]) rnd.shuffle(idx) x_train_1 = x_train_1[idx] nvalsmall_pos = int(np.floor(nval * (1 - ratio_test))) ntrainsmall_pos = int(np.floor(ntrain * (1 - ratio))) - nvalsmall_pos x_val_1 = x_train_1[:nvalsmall_pos] # 50 9 in validation. x_train_1 = x_train_1[nvalsmall_pos:nvalsmall_pos + ntrainsmall_pos] # 4500 9 in training. idx = np.arange(x_test_1.shape[0]) rnd.shuffle(idx) x_test_1 = x_test_1[idx] x_test_1 = x_test_1[:int(np.floor(ntest * (1 - ratio_test)))] # 500 9 in testing. if FLAGS.verbose: print('Number of train positive classes', ntrainsmall_pos) print('Number of val positive classes', nvalsmall_pos) y_train_subset = np.concatenate([np.zeros([x_train_0.shape[0]]), np.ones([x_train_1.shape[0]])]) y_val_subset = np.concatenate([np.zeros([x_val_0.shape[0]]), np.ones([x_val_1.shape[0]])]) y_test_subset = np.concatenate([np.zeros([x_test_0.shape[0]]), np.ones([x_test_1.shape[0]])]) y_train_pos_subset = np.ones([x_train_1.shape[0]]) y_train_neg_subset = np.zeros([x_train_0.shape[0]]) x_train_subset = np.concatenate([x_train_0, x_train_1], axis=0).reshape([-1, 28, 28, 1]) x_val_subset = np.concatenate([x_val_0, x_val_1], axis=0).reshape([-1, 28, 28, 1]) x_test_subset = np.concatenate([x_test_0, x_test_1], axis=0).reshape([-1, 28, 28, 1]) x_train_pos_subset = x_train_1.reshape([-1, 28, 28, 1]) x_train_neg_subset = x_train_0.reshape([-1, 28, 28, 1]) # Final shuffle. idx = np.arange(x_train_subset.shape[0]) rnd.shuffle(idx) x_train_subset = x_train_subset[idx] y_train_subset = y_train_subset[idx] idx = np.arange(x_val_subset.shape[0]) rnd.shuffle(idx) x_val_subset = x_val_subset[idx] y_val_subset = y_val_subset[idx] idx = np.arange(x_test_subset.shape[0]) rnd.shuffle(idx) x_test_subset = x_test_subset[idx] y_test_subset = y_test_subset[idx] train_set = DataSet(x_train_subset * 255.0, y_train_subset) train_pos_set = DataSet(x_train_pos_subset * 255.0, y_train_pos_subset) train_neg_set = DataSet(x_train_neg_subset * 255.0, y_train_neg_subset) val_set = DataSet(x_val_subset * 255.0, y_val_subset) test_set = DataSet(x_test_subset * 255.0, y_test_subset) return train_set, val_set, test_set, train_pos_set, train_neg_set def get_exp_logger(sess, log_folder): """Gets a TensorBoard logger.""" with tf.name_scope('Summary'): writer = tf.summary.FileWriter(os.path.join(log_folder), sess.graph) class ExperimentLogger(): def log(self, niter, name, value): summary = tf.Summary() summary.value.add(tag=name, simple_value=value) writer.add_summary(summary, niter) def flush(self): """Flushes results to disk.""" writer.flush() return ExperimentLogger() def evaluate(sess, x_, y_, acc_, x, y, x_test, y_test): # Calculate final results. train_acc = sess.run(acc_, feed_dict={x_: x, y_: y}) test_acc = sess.run(acc_, feed_dict={x_: x_test, y_: y_test}) return train_acc, test_acc def get_metric(pred, y): total_error = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y)))) unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y, pred))) R_squared = tf.reduce_mean(tf.subtract(1.0, tf.div(unexplained_error, total_error))) mse = tf.reduce_mean(tf.square(pred - y)) return mse def run(train_data, test_data, seed, task_name, target='label'): train_data, test_data, co_col, ca_col = data_preprocessing(train_data, test_data, ca_co_sel_flag=False, onehot_flag=True) _, test_data = anomaly_dectection(train_data, test_data) # train_data, test_data = anomaly_dectection(train_data, test_data)# Outlier detection x, y, x_val, y_val, test_set, test_set_label = \ get_dataset_(train_data, test_data, clean_ratio=clean_ratio, test_retio=test_ratio, seed=seed, val_ratio=val_ratio) # label confusion according to requirements x.reset_index(inplace=True) x.drop(columns=['基线-患者基本信息-ID_sparse'], inplace=True) y.reset_index(inplace=True) y_val = y.loc[y['sup_label'] == 0].sample(n=clean_data_num, random_state=seed) x_val = x.loc[y_val.index] x.drop(index=x_val.index, inplace=True) y.drop(index=x_val.index, inplace=True) ntrain = FLAGS.ntrain nval = FLAGS.nval ntest = FLAGS.ntest folder = os.path.join('ckpt_mnist_imbalance_cnn_p{:d}'.format(int(FLAGS.pos_ratio * 100.0)), task_name + '_{:d}'.format(seed)) if not os.path.exists(folder): os.makedirs(folder) with tf.Graph().as_default(), tf.Session() as sess: bsize = batchsize x_ = tf.placeholder(tf.float32, [None, x.shape[1]], name='x') y_ = tf.placeholder(tf.float32, [None], name='y') x_val_ = tf.placeholder(tf.float32, [None, x.shape[1]], name='x_val') y_val_ = tf.placeholder(tf.float32, [None], name='y_val') ex_wts_ = tf.placeholder(tf.float32, [None, 1], name='ex_wts') ex_wts_b = tf.placeholder(tf.float32, [None, 1], name='ex_wts_b') lr_ = tf.placeholder(tf.float32, [], name='lr') # Build training model. with tf.name_scope('Train'): _, loss_c, logits_c = get_model( x_, y_, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=None) train_op = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(loss_c) # metric_ = get_metric(logits_c, y_) # Build evaluation model. with tf.name_scope('Val'): _, loss_eval, logits_eval = get_model( x_, y_, is_training=False, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=True) metric_ = get_metric(logits_eval, y_) # Build reweighting model. if reweight: if random: ex_weights_ = reweight_random(bsize) else: ex_weights_, var_list_, grads_, grads_w_ = reweight_autodiff( x_, y_, x_val_, y_val_, ex_wts_, ex_wts_b, bsize, clean_data_num, eps=0.1, gate_gradients=1) else: if hard_mining: ex_weights_ = reweight_hard_mining(x_, y_, positive=True) else: if ratio_weighted: # Weighted by the ratio of each class. ex_weights_ = pos_ratio * (1 - y_) + (1 - pos_ratio) * (y_) else: # Weighted by uniform. ex_weights_ = tf.ones([bsize], dtype=tf.float32) / float(bsize) if FLAGS.tensorboard: exp_logger = get_exp_logger(sess, folder) else: exp_logger = None num_steps = 10 acc_sum = 0.0 acc_test_sum = 0.0 loss_sum = 0.0 count = 0 sess.run(tf.global_variables_initializer()) history = pd.DataFrame([]) history_loss = [] history_loss_acc = [] history_metric_r2 = [] history_metric_mse = [] history_metric_mae = [] for i in range(2000): kf = KFold(n_splits=2, shuffle=False, random_state=2020) # for k, (train_index, val_index) in enumerate(kf.split(x)): # x_batch, y_batch = x.iloc[train_index], y[target].iloc[train_index] x_batch, y_batch = x, y[target] ex_weights, var_list, grads, grads_w = sess.run( [ex_weights_, var_list_, grads_, grads_w_], feed_dict={x_: x_batch, y_: y_batch, x_val_: x_val, y_val_: y_val[target], ex_wts_: np.ones((batchsize, 1)), ex_wts_b: np.ones([clean_data_num, 1])}) # ww = var_list[0] # bb = var_list[1] # print(x_batch.shape) # print(ww.shape) # xx = np.matmul(np.array(x_batch), ww) # xxx = xx + bb # xxxx = xxx - np.array(y_batch).reshape(-1, 1) # ss = (xxxx ** 2) / 2 # sss = np.mean(ss) # ww_xx = xxxx.reshape(1, -1).dot(np.array(x_batch)) # re_xx = np.mean(np.abs(xxxx)) pred_tra, loss, acc, _ = sess.run( [logits_c, loss_c, metric_, train_op], feed_dict={ x_: x_batch, y_: y_batch, x_val_: x_val, y_val_: y_val[target], ex_wts_: ex_weights, lr_: lr }) print(np.unique(ex_weights)) pred = sess.run(logits_eval, feed_dict={x_: test_set, y_: test_set_label[target], ex_wts_: ex_weights}) r2 = r2_score(pred, test_set_label[target]) mse = mean_squared_error(pred, test_set_label[target]) mae = mean_absolute_error(pred, test_set_label[target]) history_loss.append(loss) history_loss_acc.append(acc) history_metric_r2.append(r2) history_metric_mse.append(mse) history_metric_mae.append(mae) # Final evaluation. history['loss'] = history_loss history['acc'] = history_loss_acc history['r2'] = history_metric_r2 history['mse'] = history_metric_mse history['mae'] = history_metric_mae pred_tra = sess.run(logits_eval, feed_dict={x_: x, y_: y[target], ex_wts_: ex_weights}) train_r2 = r2_score(pred_tra, y[target]) train_r2_ad = None train_mse = mean_squared_error(pred_tra, y[target]) train_mae = mean_absolute_error(pred_tra, y[target]) train_mape = None val_r2, val_r2_ad, val_mse, val_mae, val_mape, = None, None, None, None, None test_r2, test_r2_ad, test_mse, test_mae, test_mape = r2, None, mse, mae, None dict_ = dict(zip(['train_r2', 'train_r2_ad', 'train_mse', 'train_mae', 'train_mape', 'val_r2', 'val_r2_ad', 'val_mse', 'val_mae', 'val_mape', 'test_r2', 'test_r2_ad', 'test_mse', 'test_mae', 'test_mape'], [train_r2, train_r2_ad, train_mse, train_mae, train_mape, val_r2, val_r2_ad, val_mse, val_mae, val_mape, test_r2, test_r2_ad, test_mse, test_mae, test_mape, ])) metric_df = pd.DataFrame.from_dict([dict_]) return metric_df, pd.DataFrame([]), pd.DataFrame([]) def main(): metric_df_all = pd.DataFrame([]) test_prediction_all = pd.DataFrame([]) # for prediction of test data history_df_all = pd.DataFrame([]) # for keras model for i, trial in enumerate(tqdm(six.moves.xrange(FLAGS.nrun))): print('rnum : {}'.format(i)) seed = (trial * 2718) % 2020 # a different random seed for each run train_data, test_data = load_data_(datasets_name, task_name) metric_df, test_prediction, history_df = run(train_data, test_data, seed, task_name) metric_df_all = pd.concat([metric_df_all, metric_df], axis=0) test_prediction_all = pd.concat([test_prediction_all, test_prediction], axis=1) history_df_all = pd.concat([history_df_all, history_df], axis=1) for col in metric_df_all.columns: print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df_all[col].mean(), metric_df_all[col].std(), metric_df_all[col].max(), metric_df_all[col].median(), metric_df_all[col].min())) metric_df_all.to_csv('./metric_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False) history_df_all.to_csv('./history_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False) # test_prediction_all.columns = ['ab_time', 'ab_time_enh'] test_prediction_all.to_csv('./prediction{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits)) plt.show() pass np.random.seed(2020) datasets_name = 'LiverAblation' task_name = 'ablation_time_learn_weight' # ablation_time_enh / ablation_time_vanilla / relapse_risk nrun = 10 # num of repeated experiments clean_ratio = 1 # 1 for No label confusion test_ratio = 0 # test data ratio for label confusion val_ratio = 0 # val data ratio for label confusion n_splits = 1 # n_splits > 1 for Kfold cross validation / n_splits==1 for training all data epoch = 5000 # Kfold cross validation: a large number / training all data: mean epoch batchsize = 348 lr = 1e-4 clean_data_num = 10 reweight = True num_steps = NUM_STEPS random = False ratio_weighted = False hard_mining = False if __name__ == '__main__': main()
/Regression/src/main.py
import numpy as np import pandas as pd import six from tqdm import tqdm from sklearn.model_selection import KFold import matplotlib.pyplot as plt from preprocess.load_data import load_data_ from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection from model.training_ import training_model, model_training, precision, recall, f1, r2 from model.history_ import plot_history_df def run(train_data, test_data, seed, task_name, target='label'): train_data, test_data, co_col, ca_col, nor = data_preprocessing(train_data, test_data, ca_co_sel_flag=False, onehot_flag=True) _, test_data = anomaly_dectection(train_data, test_data) # train_data, test_data = anomaly_dectection(train_data, test_data)# Outlier detection train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \ get_dataset_(nor,train_data, test_data, clean_ratio=clean_ratio, test_retio=test_ratio, seed=seed, val_ratio=val_ratio,)# label confusion according to requirements metric_df = pd.DataFrame([]) test_prediction = pd.DataFrame([]) history_df = pd.DataFrame([]) history_list = [] epoch_len_list = [] if n_splits > 1: kf = KFold(n_splits=n_splits, shuffle=False, random_state=seed) for k, (train_index, val_index) in enumerate(kf.split(train_set_mix)): print('KFlod in : {}'.format(k)) model_, history_, metric_, test_pred_, epoch_len = training_model(train_set_mix, train_set_mix_label, task_name, train_index, val_index, test_set, test_set_label, epoch, batchsize, iter_, step_, target, seed) metric_df = pd.concat([metric_df, metric_], axis=0) history_df = pd.concat([history_df, history_], axis=1) history_list.append(history_) test_prediction = pd.concat([test_prediction, pd.DataFrame(test_pred_)], axis=1) epoch_len_list.append(epoch_len) plot_history_df(history_list, task_name) print('epoch_len_mean', np.mean(epoch_len_list)) # mean epoch in kflod cross validation else: model_, history_, metric_, test_pred_, epoch_len = training_model(train_set_mix, train_set_mix_label, task_name, None, None, test_set, test_set_label, epoch, batchsize, iter_, step_, target, seed) metric_df = pd.concat([metric_df, metric_], axis=0) test_prediction = pd.concat([test_prediction, pd.DataFrame(test_pred_)], axis=1) history_df = pd.concat([history_df, history_], axis=1) history_list.append(history_) plot_history_df(history_list, task_name, val_flag='val_') try: model_.save('{}_{}nrun_{}Fold.h5'.format(task_name, nrun, n_splits)) except: print('Failed to save model') return metric_df, test_prediction, history_df np.random.seed(2020) datasets_name = 'LiverAblation' task_name = 'ablation_time_load' # ablation_time_enh / ablation_time_vanilla / relapse_risk nrun = 10 # num of repeated experiments clean_ratio = 1 # 1 for No label confusion test_ratio = 0 # test data ratio for label confusion val_ratio = 0 # val data ratio for label confusion n_splits = 1 # n_splits > 1 for Kfold cross validation / n_splits==1 for training all data epoch = 5000 # Kfold cross validation: a large number / training all data: mean epoch batchsize = 256 iter_ = 2 # Number of iterations for label modification step_ = 0.0001 # learning rate for label modification def main(): metric_df_all = pd.DataFrame([]) test_prediction_all = pd.DataFrame([]) # for prediction of test data history_df_all = pd.DataFrame([]) # for keras model for i, trial in enumerate(tqdm(six.moves.xrange(nrun))): print('rnum : {}'.format(i)) seed = (trial * 2718) % 2020 # a different random seed for each run train_data, test_data = load_data_(datasets_name, task_name,seed) metric_df, test_prediction, history_df = run(train_data, test_data, seed, task_name) metric_df_all = pd.concat([metric_df_all, metric_df], axis=0) test_prediction_all = pd.concat([test_prediction_all, test_prediction], axis=1) history_df_all = pd.concat([history_df_all, history_df], axis=1) for col in metric_df_all.columns: print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df_all[col].mean(), metric_df_all[col].std(), metric_df_all[col].max(), metric_df_all[col].median(), metric_df_all[col].min())) metric_df_all.to_csv('./metric_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False) history_df_all.to_csv('./history_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False) # test_prediction_all.columns = ['ab_time', 'ab_time_enh'] test_prediction_all.to_csv('./prediction{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits)) plt.show() pass if __name__ == '__main__': main() pass
/Regression/src/model/bulid_model.py
import tensorflow as tf import numpy as np import pandas as pd from keras import backend as K from keras import regularizers, activations from keras.layers import Dense, Input, Add, Concatenate, Dropout, \ BatchNormalization, Activation, Multiply, Embedding, Layer, GlobalAveragePooling1D from keras.models import Model import copy class Self_Attention(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(Self_Attention, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(name='kernel', shape=(3, input_shape[2], self.output_dim), initializer='uniform', trainable=True) super(Self_Attention, self).build(input_shape) def call(self, x): WQ = K.dot(x, self.kernel[0]) WK = K.dot(x, self.kernel[1]) WV = K.dot(x, self.kernel[2]) print("WQ.shape", WQ.shape) print("K.permute_dimensions(WK, [0, 2, 1]).shape", K.permute_dimensions(WK, [0, 2, 1]).shape) QK = K.batch_dot(WQ, K.permute_dimensions(WK, [0, 2, 1])) QK = QK / (x.shape.as_list()[1] ** 0.5) QK = K.softmax(QK) print("QK.shape", QK.shape) V = K.batch_dot(QK, WV) return V def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[1], self.output_dim) class FM(Layer): def __init__(self, output_dim, latent=32, activation='relu', **kwargs): self.latent = latent self.output_dim = output_dim self.activation = activations.get(activation) super(FM, self).__init__(**kwargs) def build(self, input_shape): self.b = self.add_weight(name='W0', shape=(self.output_dim,), trainable=True, initializer='zeros') self.w = self.add_weight(name='W', shape=(input_shape[1], self.output_dim), trainable=True, initializer='random_uniform') self.v= self.add_weight(name='V', shape=(input_shape[1], self.latent), trainable=True, initializer='random_uniform') super(FM, self).build(input_shape) def call(self, inputs, **kwargs): x = inputs x_square = K.square(x) xv = K.square(K.dot(x, self.v)) xw = K.dot(x, self.w) p = 0.5*K.sum(xv-K.dot(x_square, K.square(self.v)), 1) rp = K.repeat_elements(K.reshape(p, (-1, 1)), self.output_dim, axis=-1) f = xw + rp + self.b output = K.reshape(f, (-1, self.output_dim)) return output def compute_output_shape(self, input_shape): assert input_shape and len(input_shape)==2 return input_shape[0],self.output_dim def Att(att_dim, inputs, name): V = inputs QK = Dense(att_dim//4, bias=None, activation='relu')(inputs) QK = Dense(att_dim, bias=None, activation='relu')(QK) QK = Activation("softmax", name=name)(QK) MV = Multiply()([V, QK]) return(MV) def regression_(train_x): input_dim = train_x.shape[1] l1_regul = 0 l2_regul = 0 input = Input(shape=(input_dim,)) # input_ = BatchNormalization()(input, training=False) # input_fm = FM(input_dim)(input_) # input_emb = Embedding(input_dim + 1, input_dim//2)(input) # att = Self_Attention(input_dim//2)(input_emb) # att = GlobalAveragePooling1D()(att) atts1 = Att(input_dim, input, "attention_vec10") # atts11 = Att(input_dim, input_, "attention_vec11") # mlp_layer = Add()([atts1, atts11]) # mlp_layer = Att(input_dim, mlp_layer, "attention_vec20") mlp_layer = atts1 for units_ in [64, 16]: mlp_layer = Dense(units_, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(mlp_layer) # mlp_layer = Dropout(0.5)(mlp_layer) # mlp_layer = BatchNormalization()(mlp_layer, training=False) # atts2 = Att(32, mlp_layer, "attention_vec2") mlp_layer_output = Dense(1)(mlp_layer) regression = Model(input=input, output=mlp_layer_output) return regression def classifer_(train_x): input_dim = train_x.shape[1] input_dim_emb = (input_dim + 1) input_ = Input(shape=(input_dim,)) input_c = Input(shape=(1,)) l1_regul = 0 l2_regul = 0 # encoder layers inputs = Concatenate()([input_, input_c]) atts1 = Att(input_dim_emb, inputs, "attention_vec10") # atts2 = Att(input_dim + 1, inputs, "attention_vec11") # input_fm = FM(input_dim + 1)(atts1) encoded_layer = atts1 # encoded_layer = Concatenate()([atts1, atts2]) for units_ in [64]: encoded_layer = Dense(units_, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l1_regul))(encoded_layer) encoded_layer = Dropout(0.5)(encoded_layer) encoded_layer = BatchNormalization()(encoded_layer, training=False) encoder_output = Concatenate()([encoded_layer, input_c]) # decoder layers decoded_layer = encoded_layer for units_ in [16, 128, train_x.shape[1]]: decoded_layer = Dense(units_, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l1_regul))(decoded_layer) # decoded_layer = Dropout(0.2)(decoded_layer) decoded_layer = BatchNormalization()(decoded_layer, training=False) # classifer layers classifer_layer = Dense(8, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))( encoded_layer) classifer_layer = Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))( classifer_layer) # encoder = Model(input=[input_, input_c], output=encoded_layer) classifer = Model(input=[input_, input_c], output=classifer_layer) # autoencoder = Model(input=[input_, input_c], output=decoded_layer) att_weight = Model(input=[input_, input_c], output=atts1) # classifer.add_loss(recon_loss(y_true=input_, y_pred=decoded_layer)) return classifer, att_weight def eval_loss_and_grads(x, fetch_loss_and_grads): outs = fetch_loss_and_grads(x) loss_value = outs[0] grad_values = outs[1] return loss_value, grad_values def gradient_ascent(x, fetch_loss_and_grads, iter, step, max_loss=None, min_loss=None): """get gradient :param x: [dataframe list] inputs and label :param fetch_loss_and_grads: [ ] K.function :param iter_: [int] Number of iterations for label modification :param step_: [float] Learning rate for label modification :return label_target: [nparray] Corrected label """ for i in range(iter): loss_value, grad_values = eval_loss_and_grads(x, fetch_loss_and_grads) # if max_loss is not None and loss_value > max_loss: # break x[1] = x[1] - step * np.squeeze(grad_values).reshape(-1, 1) return x def label_correction(model, model_input, label, iter_=1, step_=1e-3): """correct label :param model: [keras model] Relapse risk prediction model :param model_input: [dataframe] Inputs :param label: [series] Labels that need to be corrected :param iter_: [int] Number of iterations for label modification :param step_: [float] Learning rate for label modification :return label_target: [dataframe] Corrected label """ loss = K.variable(0.) coeff = 1 activation = model.get_layer(index=-1).output scaling = K.prod(K.cast(K.shape(activation), 'float32')) loss = loss + coeff * K.sum(K.square(activation[:, :])) / scaling dream = model.input grads = K.gradients(loss, dream[1]) grads /= K.maximum(K.mean(K.abs(grads)), 1e-7) outputs = [loss, grads] fetch_loss_and_grads = K.function([dream[0], dream[1]], outputs, K.set_learning_phase(0)) label_target = pd.DataFrame(copy.deepcopy(label)) label_target = gradient_ascent([model_input, label_target], fetch_loss_and_grads, iter=iter_, step=step_)[1] return label_target def get_model(inputs, labels, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=None, reuse=None): """ :param inputs: [Tensor] Inputs. :param labels: [Tensor] Labels. :param is_training: [bool] Whether in training mode, default True. :param dtype: [dtype] Data type, default tf.float32. :param w_dict: [dict] Dictionary of weights, default None. :param ex_wts: [Tensor] Example weights placeholder, default None. :param reuse: [bool] Whether to reuse variables, default None. """ if w_dict is None: w_dict = {} def _get_var(name, shape, dtype, initializer): key = tf.get_variable_scope().name + '/' + name if key in w_dict: return w_dict[key] else: var = tf.get_variable(name, shape, dtype, initializer=initializer) w_dict[key] = var return var with tf.variable_scope('Model', reuse=reuse): shape_list = np.append(np.array([-1]), np.squeeze(inputs.shape[1:].as_list())) # shape_list_fir = np.append(np.squeeze(inputs.shape[1:].as_list()), np.array([16])) # shape_list_sec = np.array([16, 8]) # shape_list_thr = np.array([8, 1]) inputs_ = tf.cast(tf.reshape(inputs, shape_list), dtype) labels = tf.cast(tf.reshape(labels, [-1, 1]), dtype) # w_init = tf.truncated_normal_initializer(stddev=0.1) # w1 = _get_var('w1', shape_list_fir, dtype, initializer=w_init) # w2 = _get_var('w2', shape_list_sec, dtype, initializer=w_init) # w3 = _get_var('w3', shape_list_thr, dtype, initializer=w_init) # w4 = _get_var('w4', [1, 1], dtype, initializer=w_init) # # b_init = tf.constant_initializer(0.0) # b1 = _get_var('b1', 1, dtype, initializer=b_init) # b2 = _get_var('b2', 1, dtype, initializer=b_init) # b3 = _get_var('b3', 1, dtype, initializer=b_init) # b4 = _get_var('b4', 1, dtype, initializer=b_init) # # act = tf.nn.relu # # l0 = tf.identity(inputs_, name='l0') # z1 = tf.add(tf.matmul(l0, w1), b1, name='z1') # l1 = act(z1, name='l1') # z2 = tf.add(tf.matmul(l1, w2), b2, name='z2') # l2 = act(z2, name='l2') # z3 = tf.add(tf.matmul(l2, w3), b3, name='z3') # l3 = act(z3, name='l3') # z4 = tf.add(tf.matmul(l3, w4), b4, name='z4') # logits = tf.squeeze(l3) # out = tf.sigmoid(logits) dense1 = tf.layers.dense(inputs=inputs_, units=64, activation=tf.nn.relu) dense2 = tf.layers.dense(inputs=dense1, units=16, activation=tf.nn.relu) logits = tf.layers.dense(inputs=dense2, units=1, activation=tf.nn.sigmoid) if ex_wts is None: # Average loss. loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) else: # Weighted loss. loss = tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels) * ex_wts) return w_dict, loss, logits def reweight_random(bsize, eps=0.0): """Reweight examples using random numbers. :param bsize: [int] Batch size. :param eps: [float] Minimum example weights, default 0.0. """ ex_weight = tf.random_normal([bsize], mean=0.0, stddev=1.0) ex_weight_plus = tf.maximum(ex_weight, eps) ex_weight_sum = tf.reduce_sum(ex_weight_plus) ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0)) ex_weight_norm = ex_weight_plus / ex_weight_sum return ex_weight_norm def reweight_autodiff(inp_a, label_a, inp_b, label_b, bsize_a, bsize_b, eps=0.0, gate_gradients=1): """Reweight examples using automatic differentiation. :param inp_a: [Tensor] Inputs for the noisy pass. :param label_a: [Tensor] Labels for the noisy pass. :param inp_b: [Tensor] Inputs for the clean pass. :param label_b: [Tensor] Labels for the clean pass. :param bsize_a: [int] Batch size for the noisy pass. :param bsize_b: [int] Batch size for the clean pass. :param eps: [float] Minimum example weights, default 0.0. :param gate_gradients: [int] Tensorflow gate gradients, reduce concurrency. """ ex_wts_a = tf.zeros([bsize_a], dtype=tf.float32) ex_wts_b = tf.ones([bsize_b], dtype=tf.float32) / float(bsize_b) w_dict, loss_a, logits_a = get_model( inp_a, label_a, ex_wts=ex_wts_a, is_training=True, reuse=True) var_names = w_dict.keys() var_list = [w_dict[kk] for kk in var_names] grads = tf.gradients(loss_a, var_list, gate_gradients=gate_gradients) var_list_new = [vv - gg for gg, vv in zip(grads, var_list)] w_dict_new = dict(zip(var_names, var_list_new)) _, loss_b, logits_b = get_model( inp_b, label_b, ex_wts=ex_wts_b, is_training=True, reuse=True, w_dict=w_dict_new) grads_ex_wts = tf.gradients(loss_b, [ex_wts_a], gate_gradients=gate_gradients)[0] ex_weight = -grads_ex_wts ex_weight_plus = tf.maximum(ex_weight, eps) ex_weight_sum = tf.reduce_sum(ex_weight_plus) ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0)) ex_weight_norm = ex_weight_plus / ex_weight_sum return ex_weight_norm def reweight_hard_mining(inp, label, positive=False): """Reweight examples using hard mining. :param inp: [Tensor] [N, ...] Inputs. :param label: [Tensor] [N] Labels :param positive: [bool] Whether perform hard positive mining or hard negative mining. :return [Tensor] Examples weights of the same shape as the first dim of inp. """ _, loss, logits = get_model(inp, label, ex_wts=None, is_training=True, reuse=True) # Mine for positive if positive: loss_mask = loss * label else: loss_mask = loss * (1 - label) if positive: k = tf.cast(tf.reduce_sum(1 - label), tf.int32) else: k = tf.cast(tf.reduce_sum(label), tf.int32) k = tf.maximum(k, 1) loss_sorted, loss_sort_idx = tf.nn.top_k(loss_mask, k) if positive: mask = 1 - label else: mask = label updates = tf.ones([tf.shape(loss_sort_idx)[0]], dtype=label.dtype) mask_add = tf.scatter_nd(tf.expand_dims(loss_sort_idx, axis=1), updates, [tf.shape(inp)[0]]) mask = tf.maximum(mask, mask_add) mask_sum = tf.reduce_sum(mask) mask_sum += tf.cast(tf.equal(mask_sum, 0.0), tf.float32) mask = mask / mask_sum return mask def get_lenet_model(inputs, labels, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=None, reuse=None): """Builds a simple LeNet. :param inputs: [Tensor] Inputs. :param labels: [Tensor] Labels. :param is_training: [bool] Whether in training mode, default True. :param dtype: [dtype] Data type, default tf.float32. :param w_dict: [dict] Dictionary of weights, default None. :param ex_wts: [Tensor] Example weights placeholder, default None. :param reuse: [bool] Whether to reuse variables, default None. """ if w_dict is None: w_dict = {} def _get_var(name, shape, dtype, initializer): key = tf.get_variable_scope().name + '/' + name if key in w_dict: return w_dict[key] else: var = tf.get_variable(name, shape, dtype, initializer=initializer) w_dict[key] = var return var with tf.variable_scope('Model', reuse=reuse): inputs_ = tf.cast(tf.reshape(inputs, [-1, 28, 28, 1]), dtype) labels = tf.cast(labels, dtype) w_init = tf.truncated_normal_initializer(stddev=0.1) w1 = _get_var('w1', [5, 5, 1, 16], dtype, initializer=w_init) # [14, 14, 16] w2 = _get_var('w2', [5, 5, 16, 32], dtype, initializer=w_init) # [7, 7, 32] w3 = _get_var('w3', [5, 5, 32, 64], dtype, initializer=w_init) # [4, 4, 64] w4 = _get_var('w4', [1024, 100], dtype, initializer=w_init) w5 = _get_var('w5', [100, 1], dtype, initializer=w_init) b_init = tf.constant_initializer(0.0) b1 = _get_var('b1', [16], dtype, initializer=b_init) b2 = _get_var('b2', [32], dtype, initializer=b_init) b3 = _get_var('b3', [64], dtype, initializer=b_init) b4 = _get_var('b4', [100], dtype, initializer=b_init) b5 = _get_var('b5', [1], dtype, initializer=b_init) act = tf.nn.relu # Conv-1 l0 = tf.identity(inputs_, name='l0') z1 = tf.add(tf.nn.conv2d(inputs_, w1, [1, 1, 1, 1], 'SAME'), b1, name='z1') l1 = act(tf.nn.max_pool(z1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l1') # Conv-2 z2 = tf.add(tf.nn.conv2d(l1, w2, [1, 1, 1, 1], 'SAME'), b2, name='z2') l2 = act(tf.nn.max_pool(z2, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l2') # Conv-3 z3 = tf.add(tf.nn.conv2d(l2, w3, [1, 1, 1, 1], 'SAME'), b3, name='z3') l3 = act(tf.nn.max_pool(z3, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l3') # FC-4 z4 = tf.add(tf.matmul(tf.reshape(l3, [-1, 1024]), w4), b4, name='z4') l4 = act(z4, name='l4') # FC-5 z5 = tf.add(tf.matmul(l4, w5), b5, name='z5') logits = tf.squeeze(z5) out = tf.sigmoid(logits) if ex_wts is None: # Average loss. loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) else: # Weighted loss. loss = tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels) * ex_wts) return w_dict, loss, logits
/Regression/src/model/evaluate.py
import numpy as np import pandas as pd from sklearn.metrics import mean_absolute_error, mean_squared_error, \ confusion_matrix, precision_score, recall_score, f1_score, r2_score, accuracy_score from sklearn.preprocessing import MinMaxScaler def evaluate_classification(model, train_sets, train_label, val_sets, val_label, test_sets, test_label): relapse_risk_test = model.predict(test_sets) relapse_risk_tra = model.predict(train_sets) con_mat = confusion_matrix(test_label, relapse_risk_test.round()) train_acc = accuracy_score(train_label, relapse_risk_tra.round()) test_acc = accuracy_score(test_label, relapse_risk_test.round()) train_f1 = f1_score(train_label, relapse_risk_tra.round()) test_f1 = f1_score(test_label, relapse_risk_test.round()) val_acc = None val_f1=None if val_label is not None: relapse_risk_val = model.predict(val_sets) val_acc = accuracy_score(val_label, relapse_risk_val.round()) val_f1 = f1_score(val_label, relapse_risk_val.round()) dict_ = dict(zip(['train_acc', 'test_acc', 'val_acc', 'val_f1', 'train_f1', 'test_f1'], [train_acc, test_acc, val_acc, val_f1, train_f1, test_f1])) return pd.DataFrame([dict_]) def mape(y_true, y_pred): return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 def wmape(y_true, y_pred): return np.mean(np.abs(y_true - y_pred)) / np.mean(np.abs(y_true)) * 100 def smape(y_true, y_pred): return 2.0 * np.mean(np.abs(y_pred - y_true) / (np.abs(y_pred) + np.abs(y_true))) * 100 def evaluate_regression(model, train_sets, train_label, val_x, val_label, test_sets, test_label): test_target_pred = model.predict(test_sets) train_target_pred = model.predict(train_sets) num_data_tra = train_sets.shape[0] num_feat_tra = train_sets.shape[1] num_data_test = train_sets.shape[0] num_feat_test = train_sets.shape[1] train_r2 = r2_score(train_label, train_target_pred) train_r2_ad = 1 - ((1 - train_r2) * (num_data_tra - 1)) / abs(num_data_tra - num_feat_tra - 1) test_r2 = r2_score(test_label, test_target_pred) test_r2_ad = 1 - ((1 - test_r2) * (num_data_test - 1)) / abs(num_data_test - num_feat_test - 1) train_mse = mean_squared_error(train_label, train_target_pred) train_mae = mean_absolute_error(train_label, train_target_pred) test_mse = mean_squared_error(test_label, test_target_pred) test_mae = mean_absolute_error(test_label, test_target_pred) mms = MinMaxScaler(feature_range=(0.1, 1)) train_label_mms = mms.fit_transform(np.array(train_label).reshape(-1, 1)) test_label_mms = mms.fit_transform(np.array(test_label).reshape(-1, 1)) train_target_pred_mns = mms.fit_transform(train_target_pred.reshape(-1, 1)) test_target_pred_mns = mms.fit_transform(test_target_pred.reshape(-1, 1)) train_mape = wmape(train_label_mms, train_target_pred_mns.reshape(-1, )) test_mape = wmape(test_label_mms, test_target_pred_mns.reshape(-1, )) err = test_label - np.squeeze(test_target_pred) if not val_x.empty: val_target_pred = model.predict(val_x) num_data_val = val_x.shape[0] num_feat_val = val_x.shape[1] val_r2 = r2_score(val_label, val_target_pred) val_r2_ad = 1 - ((1 - val_r2) * (num_data_val - 1)) / abs(num_data_val - num_feat_val - 1) val_mse = mean_squared_error(val_label, val_target_pred) val_mae = mean_absolute_error(val_label, val_target_pred) val_label_mms = mms.fit_transform(np.array(val_label).reshape(-1, 1)) val_target_pred_mns = mms.fit_transform(val_target_pred.reshape(-1, 1)) val_mape = smape(val_label_mms, val_target_pred_mns.reshape(-1, )) else: val_r2, val_r2_ad, val_mse, val_mae, val_mape = None, None, None, None, None dict_ = dict(zip(['train_r2', 'train_r2_ad', 'train_mse', 'train_mae', 'train_mape', 'val_r2', 'val_r2_ad', 'val_mse', 'val_mae', 'val_mape', 'test_r2', 'test_r2_ad', 'test_mse', 'test_mae', 'test_mape'], [train_r2, train_r2_ad, train_mse, train_mae, train_mape, val_r2, val_r2_ad, val_mse, val_mae, val_mape, test_r2, test_r2_ad, test_mse, test_mae, test_mape, ])) return pd.DataFrame.from_dict([dict_])
/Regression/src/model/history_.py
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import math plt.rc('font', family='Times New Roman') font_size = 16 def plot_metric_df(history_list, task_name, val_flag='test_'): if 'relapse_risk' in task_name: metric_list = ['loss', 'f1'] else: metric_list = ['r2', 'mae', 'mse'] fig = plt.figure(figsize=(20, 4)) L = len(metric_list) row = math.floor(math.sqrt(L)) col = L / row for i, metric in enumerate(metric_list): plt.subplot(row, col, i+1) show_metric(history_list, metric, val_flag) fig.subplots_adjust(top=0.8) legend_labels = ['ours', # 'enh_nonrelapse', 'ATT+MLP', # 'vanilla_nonrelapse', 'LGB', # 'lightgbm_nonrelapse', 'Lasso', # 'lasso_nonrelapse' ] plt.legend(labels= legend_labels, ncol = len(legend_labels), # loc='best', loc='upper center', fontsize=14, bbox_to_anchor=(-1.2, 1, 1, 0.2), borderaxespad = 0., ) # plt.title('{} {}'.format(task_name, metric), fontsize=font_size) def show_metric(history_list, metrics_name, val_flag=''): marker_list = ['*', 'd', 's', 'x', 'o'] metrics_name_dict = {'r2':'R-square', 'mae':'mean absolute error', 'mse':'mean squared error'} for m, history in enumerate(history_list): history_metric = history.filter(regex=r'\b{}{}\b'.format(val_flag, metrics_name))[:3000] plt.plot(history_metric, linestyle=':', marker=marker_list[m], linewidth=2) plt.xticks(range(0, 11), fontsize=font_size) plt.yticks(fontsize=font_size) plt.ylabel(metrics_name_dict[metrics_name], fontsize=font_size) plt.xlabel('Round', fontsize=font_size) def plot_history_df(history_list, task_name, val_flag=''): if 'relapse_risk' in task_name: metric_list = ['loss', 'f1'] else: metric_list = ['loss', 'r2'] plt.figure(figsize=(12, 4)) L = len(metric_list) row = math.floor(math.sqrt(L)) col = L / row for i, metric in enumerate(metric_list): plt.subplot(row, col, i+1) show_history(history_list, metric, val_flag) plt.legend(labels=['attention', 'attention+mlp', 'attention+label corrected', 'attention+mlp+label corrected(ours)', 'mlp', 'mlp+label corrected'], fontsize=14) # plt.title('{} {}'.format(metric, task_name), fontsize=font_size) def show_history(history_list, metrics_name, val_flag=''): marker_list = ['^', 'd', 's', '*', 'x', 'o'] for m, history in enumerate(history_list): history_metric = history.filter(regex=r'\b{}{}'.format(val_flag, metrics_name))[:3000] history_ = np.mean(history_metric, axis=1) len_ = history_.shape[0] plt.plot(history_, linewidth=2, marker=marker_list[m], markevery=200) plt.fill_between(range(len_), np.min(history_metric, axis=1), np.max(history_metric, axis=1), alpha=0.3) plt.xticks(fontsize=font_size) plt.yticks(fontsize=font_size) plt.ylabel(val_flag + metrics_name, fontsize=font_size) plt.xlabel('Epoch', fontsize=font_size) def plot_history(history_list, task_name, val_flag=False): if task_name == 'relapse_risk': metric_list = ['loss', 'f1'] else: metric_list = ['loss', 'r2'] plt.figure(figsize=(12, 4)) L = len(metric_list) for i, metric in enumerate(metric_list): plt.subplot(squrt(), L, i+1) show_train_history(history_list, metric) if val_flag: show_train_history(history_list, 'val_{}'.format(metric)) plt.legend(labels=[metric, 'val_{}'.format(metric)], loc='upper left') plt.title('{} {}'.format(task_name, metric)) def history_save(history_list, history_name): history_all = pd.DataFrame([]) for history in history_list: history_ = pd.DataFrame.from_dict(history.history, orient='index') history_all = pd.concat([history_all, history_], axis=0) history_all.to_csv('./hitory_{}.csv'.format(history_name)) def show_train_history(history_list, metrics_name): metrics_list = None for history in history_list: history_metric = pd.DataFrame(np.array(history.history[metrics_name]).reshape(1, -1)) if metrics_list is None: metrics_list = history_metric else: metrics_list = pd.concat([metrics_list, history_metric], axis=0) # metrics = np.median(metrics_list, axis=0) metrics = np.mean(metrics_list, axis=0) plt.plot(metrics) plt.ylabel(metrics_name) plt.xlabel('Epoch')
/Regression/src/model/training_.py
import copy import pandas as pd import numpy as np import lightgbm as lgb from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression from keras.models import load_model from keras import backend as K from keras.optimizers import Adam, RMSprop, SGD from keras.callbacks import EarlyStopping from model.bulid_model import classifer_, regression_, label_correction from model.evaluate import evaluate_classification, evaluate_regression def precision(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision def recall(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def r2(y_true, y_pred): return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true))) def f1(y_true, y_pred): return 2 * precision(y_true, y_pred) * \ recall(y_true, y_pred) / (precision(y_true, y_pred) + recall(y_true, y_pred) + 1e-7) # model compile and fit def model_training(model, train_sets, train_label, val_data, val_label, lr, task, epoch, batch_size, patience=100): if task == 'classification': metrics = ['acc', f1, precision, recall] loss = 'binary_crossentropy' val_metric = 'val_f1' elif task == 'regression': metrics = ['mse', 'mae', r2] metrics = [r2] loss = 'mean_squared_error' val_metric = 'val_r2' model.compile(optimizer=RMSprop(lr=lr), loss=loss, metrics=metrics) model.summary() if val_label is None: history = model.fit(train_sets, train_label, epochs=epoch, batch_size=batch_size, shuffle=True, callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')], # callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)], verbose=2, ) else: history = model.fit(train_sets, train_label, # validation_split=0.3, validation_data=(val_data, val_label), epochs=epoch, batch_size=batch_size, shuffle=True, callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')], # callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)], verbose=2, ) return history, model # select model def training_model(train_set, train_set_label, task_name, train_index, val_index, test_set, test_set_label, epoch, batchsize, iter_=None, step_=None, target='label', seed=2020, label_corr_epoch=2): if train_index is not None: train_x, val_x = train_set.iloc[train_index], train_set.iloc[val_index] train_y, val_y = train_set_label.iloc[train_index], train_set_label.iloc[val_index] val_label = val_y[target] val_suplabel = val_y['sup_label'] val_x_time = val_x.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns) else: train_x = train_set train_y = train_set_label val_x = test_set val_x_time = test_set.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns) val_label = test_set_label[target] val_suplabel = test_set_label['sup_label'] train_x_time = train_x.drop(columns=train_x.filter(regex=r'术后|出院|Post').columns) test_set_time = test_set.drop(columns=test_set.filter(regex=r'术后|出院|Post').columns) # train_x_time.to_csv('train_data.csv', encoding='gb18030') train_data_raw = pd.read_csv('train_data.csv', encoding='gb18030') xx = set(train_data_raw.columns) - set(train_x_time.columns) rr = set(train_x_time.columns) - set(train_data_raw.columns) if 'risk' in task_name: classifer, att_weight = classifer_(train_x) # epoch=130 for training whole data 107 # lr=8e-5 batchsize=8 patience= 90 history, model = model_training(classifer, [train_x, train_y[target]], train_y['sup_label'], [val_x, val_label], val_suplabel, 8e-5, 'classification', 120, 16, 190) metric = evaluate_classification(model, [train_x, train_y[target]], train_y['sup_label'], [val_x, val_label], val_suplabel, [test_set, test_set_label[target]], test_set_label['sup_label']) test_pred = model.predict([test_set, test_set_label[target]]) history_df = pd.DataFrame.from_dict(history.history, orient='columns') len_ = history_df.shape[0] # count the number of epoch elif 'vanilla' in task_name: regression = regression_(train_x_time) # epoch=2926 for training whole data 2709 for non-relapse data # lr=9e-6 batchsize=256 patience= 350 history, model = model_training(regression, train_x_time, train_y[target], val_x_time, val_label, 9e-6, 'regression', 15000, batchsize, 2500) #240 2335 metric = evaluate_regression(model, train_x_time, train_y[target], val_x_time, val_label, test_set_time, test_set_label[target], ) test_pred = model.predict(test_set_time) history_df = pd.DataFrame.from_dict(history.history, orient='columns') len_ = len(history.history['loss']) # count the number of epoch elif 'load' in task_name: model = load_model('ablation_time_enh_10nrun_1Fold.h5', custom_objects={'r2': r2}) test_pred = model.predict(test_set_time) history_df = pd.DataFrame([]) metric = evaluate_regression(model, train_x_time, train_y[target], val_x_time, val_label, test_set_time, test_set_label[target], ) len_ = 0 elif 'enh' in task_name: history_df = pd.DataFrame([]) classifer, att_weight = classifer_(train_x) # lr=8e-5 batchsize=16 epoch= 120 history, classifer = model_training(classifer, [train_set, train_set_label[target]], train_set_label['sup_label'], [pd.DataFrame([]), None], None, 8e-5, 'classification', 120, 16, 130) label_target = copy.deepcopy(train_set_label[target]) regression_enh = regression_(train_x_time) len_ = 0 for i in range(label_corr_epoch): print('iter {}'.format(i)) label_target = label_correction(classifer, train_set, label_target, iter_=iter_, step_=step_) # label_target = train_y[target] if train_index is not None: label_target_train = label_target.iloc[train_index] val_label = label_target.iloc[val_index] else: label_target_train = label_target # lr=9e-6 batchsize=256 epoch= 600 history, model = model_training(regression_enh, train_x_time, label_target_train, val_x_time, val_label, 7e-5, 'regression', 225, batchsize, 220,) # 1e-5, 'regression', 1750, batchsize, 2120, ) metric = evaluate_regression(model, train_x_time, train_y[target], val_x_time, val_label, test_set_time, test_set_label[target], ) test_pred = model.predict(test_set_time) if history_df.empty: history_df = pd.DataFrame.from_dict(history.history, orient='columns') else: history_df = pd.concat([history_df, pd.DataFrame.from_dict(history.history, orient='columns')], axis=0) len_ += history_df.shape[0] # count the number of epoch history_df.reset_index(drop=True, inplace=True) if train_index is not None: val_pred = model.predict(val_x_time) risk = classifer.predict([val_x, train_set_label[target].iloc[val_index]]) risk_corr = classifer.predict([val_x, val_pred]) risk_change = risk - risk_corr risk_change_max = risk_change.max() risk_change_mean = risk_change.mean() x = 1 elif 'lr' in task_name: model = LassoCV(random_state=seed) # model = RidgeCV() model.fit(train_x_time, train_y[target]) metric = evaluate_regression(model, train_x_time, train_y[target], val_x_time, val_label, test_set_time, test_set_label[target], ) history_df = pd.DataFrame([]) len_ = 0 test_pred = model.predict(test_set_time) elif 'gbm' in task_name: model = lgb.LGBMRegressor( max_depth=3, bagging_fraction=0.5, feature_fraction=0.5, reg_alpha=1, reg_lambda=1, min_child_samples=10, n_estimators=200, learning_rate=1e-1, random_state=seed, ) model.fit(train_x_time, train_y[target]) metric = evaluate_regression(model, train_x_time, train_y[target], val_x_time, val_label, test_set_time, test_set_label[target], ) history_df = pd.DataFrame([]) len_ = 0 test_pred = model.predict(test_set_time) return model, history_df, metric, test_pred, len_
/Regression/src/preprocess/get_dataset.py
from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler import pandas as pd import numpy as np from preprocess import plot_tabel def get_dataset_(nor, train_data, test_data, clean_ratio, test_retio, seed, target='label', val_ratio=0): if test_retio == 0 or test_data is not None: train_set = train_data test_set = test_data else: train_set, test_set = train_test_split(train_data, test_size=test_retio, random_state=seed) if clean_ratio < 1: train_set_, train_set_clean = train_test_split(train_set, test_size=clean_ratio, random_state=seed) label_distrib = np.random.normal(loc=train_set_[target].describe().loc['mean'], scale=train_set_[target].describe().loc['std'], size=train_set_[target].shape) alpha = 1 beta = 1 train_label_ = train_set_[target] + \ alpha * np.random.normal(loc=0., scale=1., size=train_set_[target].shape) + beta * label_distrib train_set_[target] = train_label_ train_set_['sup_label'] = 1 train_set_clean['sup_label'] = 0 test_set['sup_label'] = 0 else: train_set_ = None train_set_clean = train_set train_set_mix = pd.concat([train_set_, train_set_clean], axis=0) # mix_ratio = train_set[train_set[target] != train_set_mix[target]].index # print('real mix ratio is {}'.format(mix_ratio)) if val_ratio > 0: train_set_mix, val_set = train_test_split(train_set_mix, test_size=val_ratio, random_state=seed) val_set_label = val_set[[target, 'sup_label']] val_set.drop(columns=[target, 'sup_label'], inplace=True) else: val_set = None val_set_label = None train_set_mix_label = train_set_mix[[target, 'sup_label']] test_set_label = test_set[[target, 'sup_label']] # plot_tabel.metric_hist(test_set, nor) train_set_mix.drop(columns=[target, 'sup_label'], inplace=True) test_set.drop(columns=[target, 'sup_label'], inplace=True) return train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label def data_preprocessing(train_data, test_data=None, ca_feat_th=8, ca_co_sel_flag=True, onehot_flag=False, target='label'): if test_data is not None: train_data['tab'] = 1 test_data['tab'] = 0 data_raw = pd.concat([train_data, test_data], axis=0) print('\ndata_raw', data_raw.shape) data = data_raw.dropna(axis=1, how='all') xx = data.isnull().sum() data = data.fillna(0) if ca_co_sel_flag: ca_col = [] co_col = [] data_columns_label = data.filter(regex=r'label').columns data_columns = data.columns.drop(data_columns_label) # data_columns = data.columns.drop(['sup_label']) for col in data_columns: data_col = data[col] col_feat_num = len(set(data_col)) if col_feat_num > ca_feat_th: col_ = col + '_dense' co_col.append(col_) data.rename(columns={col: col_}, inplace=True) elif ca_feat_th >= col_feat_num > 1: col_ = col + '_sparse' ca_col.append(col_) data.rename(columns={col: col_}, inplace=True) else: ca_col = data.filter(regex=r'sparse').columns co_col = data.filter(regex=r'dense').columns data[ca_col] = pd.concat([data[ca_col].apply(lambda ser: pd.factorize(ser)[0])]) data[ca_col] = data[ca_col].apply(LabelEncoder().fit_transform) if onehot_flag: data = pd.get_dummies(data, columns=ca_col) co_col = co_col.append(data.columns[data.columns == target]) # 回归目标也需要归一化避免在sup_label分类预测中的模型崩溃 mms = MinMaxScaler(feature_range=(0.1, 1.1)) std = StandardScaler() xx = data.filter(regex=r'label').describe() xx_col = xx.index xx_min = xx.loc['min', :] xx_max = xx.loc['max', :] xx_std = xx.loc['std', :] data[co_col] = pd.DataFrame(std.fit_transform(data[co_col]), columns=co_col, index=data.index) # data[co_col] = pd.DataFrame(mms.fit_transform(data[co_col]), columns=co_col, index=data.index) # data = pd.DataFrame(mms.fit_transform(data), columns=data.columns, index=data.index) if test_data is not None: train_data = data[data['tab'] == 1].drop(columns=['tab']) test_data = data[data['tab'] == 0].drop(columns=['tab']) else: train_data = data ca_col = data.filter(regex=r'sparse').columns co_col = data.filter(regex=r'dense').columns return train_data, test_data, co_col, ca_col, std def anomaly_dectection(train_data=None, test_data=None, target='label'): clean_data = [] for data in [train_data, test_data]: if not data.empty: std_ = data[target].std() mean_ = data[target].mean() data = data[data[target] < mean_ + 3 * std_] data = data[data[target] > mean_ - 3 * std_] clean_data.append(data) return clean_data[0], clean_data[1]
/Regression/src/preprocess/load_data.py
#coding=gb18030 import numpy as np import pandas as pd def load_data_(datasets, task_name='', seed=2020): if datasets == 'winequality_white': data_path = '../DataSet/wine/{}.csv'.format(datasets) data = pd.read_csv(data_path) data.rename(columns={'quality': 'label'}, inplace=True) data.dropna(axis=0, subset=['label'], inplace=True) train_data = data.fillna(0) test_data = None elif datasets == 'PPH': data_path = '../DataSet/PPH/{}.csv'.format(datasets) data_head = pd.read_csv('../DataSet/PPH/PPH_head.csv', encoding='gb18030') data = pd.read_csv(data_path, encoding='gb18030', index_col='index') col = [] for col_ in data.columns: col.append(col_ + np.squeeze(data_head[col_].values)) data.columns = np.array(col) # data.to_csv('../DataSet/PPH/data_feat_name_add.csv', index=False, encoding='gb18030') data['sup_label'] = 0 label_col = data.filter(regex=r'n61').columns.values[0] data.rename(columns={label_col: 'label'}, inplace=True) data.dropna(axis=0, subset=['label'], inplace=True) data['hours'] = data.filter(regex=r'field12').values - data.filter(regex=r'field9').values data['hours'] = data['hours'].apply(lambda x: 24 + x if x < 0 else x) data['minutes'] = data.filter(regex=r'field13').values - data.filter(regex=r'field10').values data['minutes'] = data['minutes'].apply(lambda x: 60 + x if x < 0 else x) data['minutes'] += data['hours'] * 60 drop_columns = data.filter( regex=r'n421|field11|其他|field28|其他.1|n262|hours|n61|n51|n4417|n4318|field9|field10|field12|field13').columns train_data = data.drop(columns=drop_columns) # data.fillna(0, inplace=True) test_data = None elif datasets == 'LiverAblation': data_path = '../DataSet/LiverAblation/{}.csv'.format(datasets) data = pd.read_csv(data_path, encoding='gb18030', index_col='基线-患者基本信息-ID_sparse') # data_path = '../DataSet/LiverAblation/{}_trans.csv'.format(datasets) # data = pd.read_csv(data_path, encoding='gb18030', index_col='baseline_info_ID_sparse') data.rename(columns={'time_dense': 'label'}, inplace=True) data.rename(columns={'relapse_sparse': 'sup_label'}, inplace=True) drop_columns_ = data.filter(regex=r'随|ID|cluster|followupInfomation').columns data.drop(columns=drop_columns_, inplace=True) data_1 = data.loc[data['sup_label'] == 1] data_0 = data.loc[data['sup_label'] == 0].sample(n=data_1.shape[0] * 1, random_state=seed) data_undersmapling = pd.concat([data_1, data_0]).sample(frac=1, random_state=seed) test_data = data.drop(index=data_undersmapling.index) if 'non' in task_name: train_data = data_0 else: train_data = data_undersmapling else: train_data = None test_data = None return train_data, test_data
/Regression/src/preprocess/plot_tabel.py
import copy import pandas as pd import matplotlib.pyplot as plt from model.history_ import plot_history_df, plot_metric_df import numpy as np from scipy.stats import ttest_ind, levene from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score def mape(y_true, y_pred): return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 def metric_hist(data, nor=None): root_ = '../report/result/' file_list = [ 'ablation_time_enh_1nrun_10Fold.csv',# ours # 'ablation_time_vanilla_att_only__1nrun_10Fold.csv',# att only # 'ablation_time_vanilla_natt_1nrun_10Fold.csv',#mlp only # 'ablation_time_enh_att_only__10nrun_1Fold.csv',# 'ablation_time_enh_natt_1nrun_10Fold.csv',# mlp+lc 'lr_10nrun_1Fold.csv',# baseline_lasso 'lr_non_1nrun_10Fold.csv',# nonrelapse 'gbm_1nrun_10Fold.csv',# gbm 'gbm_non_1nrun_10Fold.csv',# nonrelapse 'ablation_time_vanilla_1nrun_10Fold.csv',# ATT+MLP 'ablation_time_vanilla_non_1nrun_10Fold.csv',# att+mlp+non relapse # 'ablation_time_learn_weight_10nrun_1Fold.csv', # 'ablation_time_enh_non_10nrun_1Fold.csv', # 0.2297 # 'ablation_time_vanilla_att_only_10nrun_1Fold.csv',# # 'ablation_time_enh_natt__10nrun_1Fold.csv',# 0.5686 # 'ablation_time_enh_att_only__10nrun_1Fold.csv',# 0.5690 # 'ablation_time_enh_natt__10nrun_1Fold.csv',# 0.5686 ] metric_file_list = ['metric_' + file for file in file_list] history_file_list = ['history_' + file for file in file_list] pred_file_list = ['prediction' + file for file in file_list] tt_pvalue_list = np.array([]) lv_pvalue_list = np.array([]) metric_file_base = metric_file_list[0] metric_df_base = pd.read_csv(root_ + metric_file_base) for metric_file in metric_file_list: metric_df = pd.read_csv(root_ + metric_file) mae_col = metric_df.filter(regex=r'mae').columns mse_col = metric_df.filter(regex=r'mse').columns # metric_df[mae_col] = metric_df.loc[:, mae_col] * 562.062540 # metric_df[mse_col] = metric_df.loc[:, mse_col] * 562.062540**2 print('\n', metric_file) for col in metric_df.columns: print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df[col].mean(), metric_df[col].std(), metric_df[col].max(), metric_df[col].median(), metric_df[col].min())) v1 = metric_df_base['test_mae'] v2 = metric_df['test_mae'] std_ = levene(v1, v2).pvalue lv_pvalue_list = np.append(lv_pvalue_list, std_) equal_var_ = False if std_ > 0.05: equal_var_ = True res = ttest_ind(v1, v2, equal_var=equal_var_).pvalue tt_pvalue_list = np.append(tt_pvalue_list, res) tt_pvalue_list = tt_pvalue_list.reshape(-1, 1) for pred_file in pred_file_list: pred_df = pd.read_csv(root_ + pred_file, index_col=0) data_inver_label_df = pd.DataFrame([]) metric_df = pd.DataFrame([]) for pred in pred_df: data_co = data.filter(regex=r'dense|^label') data_ = copy.deepcopy(data_co) data_.loc[:, 'label'] = np.array(pred_df[pred]) data_inver_pred = pd.DataFrame(nor.inverse_transform(data_), columns=data_.columns) data_inver = pd.DataFrame(nor.inverse_transform(data_co), columns=data_co.columns) data_inver_pred_label = data_inver_pred['label'] data_inver_label = data_inver['label'] mae = mean_absolute_error(data_inver_label, data_inver_pred_label) mse = mean_squared_error(data_inver_label, data_inver_pred_label) mape_ = mape(data_inver_label, data_inver_pred_label) r2 = r2_score(data_inver_label, data_inver_pred_label) dict_ = dict(zip([ 'test_r2', 'test_mse', 'test_mae', 'test_mape'], [ r2, mse, mae, mape_, ])) metric_ = pd.DataFrame.from_dict([dict_]) metric_df = pd.concat([metric_df, metric_], axis=0) data_inver_label_df = pd.concat([data_inver_label_df, data_inver_label], axis=1) # data_inver.to_csv(root_ + 'inver' + pred_file) history_df_all_list = [] for history_file in history_file_list: history_df_all = pd.read_csv(root_ + history_file) history_df_all_list.append(history_df_all) # plot_history_df(history_df_all_list, task_name='ablation_time', val_flag='') plot_history_df(history_df_all_list, task_name='of the experimental results of ablation time prediction ', val_flag='val_') plt.show() metric_df_all_list = [] metric_file_list = ['metric_ablation_time_enh_10nrun_1Fold.csv', # 'metric_ablation_time_enh_non_10nrun_1Fold.csv', 'metric_ablation_time_vanilla_10nrun_1Fold.csv', # 'metric_ablation_time_vanilla_non_10nrun_1Fold.csv', 'metric_gbm_10nrun_1Fold.csv', # 'metric_gbm_non_10nrun_1Fold.csv', 'metric_lr_10nrun_1Fold.csv', # 'metric_lr_non_10nrun_1Fold.csv', ] for history_file in metric_file_list: history_df_all = pd.read_csv(root_ + history_file) metric_df_all_list.append(history_df_all) # plot_history_df(history_df_all_list, task_name='ablation_time', val_flag='') plot_metric_df(metric_df_all_list, task_name='ablation_time', val_flag='test_') plt.show() pass
/Regression/src/useless/ave_logsit_baseline.py
import pandas as pd import numpy as np from tqdm import tqdm import six import tensorflow as tf from keras import losses from keras import backend as K from keras import optimizers from keras.models import Sequential from keras.layers import Dense from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error, mean_squared_error, \ confusion_matrix, precision_score, recall_score, f1_score, r2_score from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression from sklearn.svm import SVR from sklearn.neighbors import KNeighborsRegressor import lightgbm as lgb import matplotlib.pyplot as plt # from deepctr.models import DeepFM, xDeepFM, DCN, WDL # from deepctr.feature_column import SparseFeat, get_feature_names, DenseFeat from preprocess.load_data import load_data_ from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False def mean_absolute_percentage_error(y_true, y_pred): return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 def ctr_model(linear_feature_columns, dnn_feature_columns): adam = tf.keras.optimizers.Adam(lr=0.0001) model = WDL(linear_feature_columns, dnn_feature_columns, task='regression') # model = xDeepFM(linear_feature_columns, dnn_feature_columns, task='regression') model.compile(adam, "huber_loss", metrics=['mae'],) return model def baseline_model(train_set_mix, train_set_mix_label, ca_col, co_col, seed): clf = lgb.LGBMRegressor(max_depth=3, bagging_fraction=0.7, feature_fraction=0.7, reg_alpha=0.5, reg_lambda=0.5, min_child_samples=10, n_estimators=200, learning_rate=1e-1, random_state=seed, ) # clf = lgb.LGBMRegressor(max_depth=4, # bagging_fraction=0.8, # feature_fraction=0.8, # reg_alpha=0.8, # reg_lambda=0.8, # min_child_samples=10, # n_estimators=500, # learning_rate=1e-1, # ) # clf = lgb.LGBMRegressor() # clf = LassoCV() # clf = RidgeCV() return clf def run(train_data, test_data, seed, target='label'): np.random.seed(seed) train_data, test_data, co_col, ca_col = data_preprocessing(train_data, test_data, ca_co_sel_flag=False, onehot_flag=False) # train_data, _ = anomaly_dectection(train_data, test_data=pd.DataFrame()) # _, test_data = anomaly_dectection(train_data=pd.DataFrame(), test_data=test_data) # train_data, test_data = anomaly_dectection(train_data=train_data, test_data=test_data) train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \ get_dataset_(train_data, test_data, clean_ratio=clean_ratio, test_retio=test_ratio, val_ratio=val_ratio, seed=seed) # fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=pd.concat([train_set_mix, test_set], axis=0)[feat].nunique(), embedding_dim=4) # for i, feat in enumerate(ca_col)] + [DenseFeat(feat, 1,) # for feat in co_col] # # dnn_feature_columns = fixlen_feature_columns # linear_feature_columns = fixlen_feature_columns # feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns) # train_set_mix = {name: train_set_mix[name].values for name in feature_names} # test_set = {name: test_set[name].values for name in feature_names} # model = ctr_model(linear_feature_columns, dnn_feature_columns,) # history = model.fit(train_set_mix, train_set_mix_label[target].values, # batch_size=512, epochs=180, verbose=1, validation_split=0.2, ) # train_set_mix = train_set_mix.loc[train_set_mix_label['sup_label'] == 0] # train_set_mix_label = train_set_mix_label.loc[train_set_mix_label['sup_label'] == 0] model = baseline_model(train_set_mix, train_set_mix_label, ca_col, co_col, seed) model.fit(train_set_mix, train_set_mix_label[target]) # feat_df = pd.DataFrame({'column': train_set_mix.columns, 'importance': model.feature_importances_.round(5)}) # feat_df_sort = feat_df.sort_values(by='importance', ascending=False) # feat_df_sort_ = feat_df_sort.set_index(['column']) # feat_df_sort_[:30].plot.barh(figsize=(15, 15), fontsize=12) # plt.title("n61_lgb_特征重要性") # plt.show() train_target_pred = model.predict(train_set_mix) test_target_pred = model.predict(test_set) train_R2 = r2_score(train_set_mix_label[target], train_target_pred) num_data = train_set_mix.shape[0] num_feat = train_set_mix.shape[1] train_R2_ad = 1 - ((1 - train_R2) * (num_data - 1)) / abs(num_data - num_feat - 1) test_R2 = r2_score(test_set_label[target], test_target_pred) num_data = test_set.shape[0] num_feat = test_set.shape[1] test_R2_ad = 1 - ((1 - test_R2) * (num_data - 1)) / abs(num_data - num_feat - 1) train_mse = mean_squared_error(train_set_mix_label[target], train_target_pred) train_mae = mean_absolute_error(train_set_mix_label[target], train_target_pred) test_mse = mean_squared_error(test_set_label[target], test_target_pred) test_mae = mean_absolute_error(test_set_label[target], test_target_pred) test_mape = mean_absolute_percentage_error(test_set_label[target], test_target_pred.reshape(-1, )) err = test_set_label[target] - np.squeeze(test_target_pred) return [train_R2, test_R2, train_R2_ad, test_R2_ad, train_mse, test_mse, train_mae, test_mae, test_mape] def run_many(train_data, test_data): metric_list_all = [] for trial in tqdm(six.moves.xrange(nrun)): metric_list = run(train_data, test_data, (trial * 2718) % 2020) metric_list_all.append(metric_list) metric_df = pd.DataFrame(np.array(metric_list_all)) metric_df.columns = ['train_R2', 'test_R2', 'train_R2_ad', 'test_R2_ad', 'train_mse', 'test_mse', 'train_mae', 'test_mae', 'test_mape',] for col in metric_df.columns: print('{} {:.4f} ({:.4f}) max: {:.4f} min: {:.4f}'.format(col, metric_df[col].mean(), metric_df[col].std(), metric_df[col].max(), metric_df[col].min())) pass def main(): train_data, test_data = load_data_(datasets_name) run_many(train_data, test_data) pass datasets_name = 'LiverAblation' nrun = 10 clean_ratio = 1 test_ratio = 0.2 val_ratio = 0.2 epoch = 200 batchsize = 1 iter_ = 1 step_ = 0.1 if __name__ == '__main__': main()
/Regression/src/useless/keras_att.py
import pandas as pd import numpy as np from tqdm import tqdm import six import tensorflow as tf from keras import losses from keras import backend as K from keras import optimizers from keras.models import Sequential, Model from keras.callbacks import EarlyStopping from keras.layers import Input, Dense, Multiply, Activation, Layer, \ GlobalAveragePooling1D, Reshape, RepeatVector, Flatten, Lambda, Add, Embedding from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import mean_absolute_error, mean_squared_error, \ confusion_matrix, precision_score, recall_score, f1_score, r2_score import matplotlib.pyplot as plt from preprocess.load_data import load_data_ from preprocess.get_dataset import get_dataset_, foo, anomaly_dectection class Self_Attention(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(Self_Attention, self).__init__(**kwargs) def build(self, input_shape): # 为该层创建一个可训练的权重 # inputs.shape = (batch_size, time_steps, seq_len) self.kernel = self.add_weight(name='kernel', shape=(3, 1, self.output_dim), initializer='uniform', trainable=True) super(Self_Attention, self).build(input_shape) def call(self, x): x = K.expand_dims(x, axis=2) WQ = K.dot(x, self.kernel[0]) WK = K.dot(x, self.kernel[1]) WV = K.dot(x, self.kernel[2]) print("WQ.shape", WQ.shape) print("K.permute_dimensions(WK, [0, 2, 1]).shape", K.permute_dimensions(WK, [0, 2, 1]).shape) QK = K.batch_dot(WQ, K.permute_dimensions(WK, [0, 2, 1])) QK = QK / (x.shape.as_list()[-1] ** 0.5) QK = K.softmax(QK) print("QK.shape", QK.shape) V = K.batch_dot(QK, WV) return V def compute_output_shape(self, input_shape): return (input_shape[0], input_shape[1], self.output_dim) def mean_absolute_percentage_error(y_true, y_pred): return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 def get_activations(model, inputs, print_shape_only=False, layer_name=None): activations = [] input = model.input if layer_name is None: outputs = [layer.output for layer in model.layers] else: outputs = [layer.output for layer in model.layers if layer.name == layer_name] # all layer outputs funcs = [K.function([input] + [K.learning_phase()], [out]) for out in outputs] # evaluation functions layer_outputs = [func([inputs, 1.])[0] for func in funcs] for layer_activations in layer_outputs: activations.append(layer_activations) if print_shape_only: print(layer_activations.shape) else: print(layer_activations) return activations def r2(y_true, y_pred): return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true))) def r_square(y_true, y_pred): SSR = K.mean(K.square(y_pred-K.mean(y_true)), axis=-1) SST = K.mean(K.square(y_true-K.mean(y_true)), axis=-1) return SSR/SST def Att(att_dim, inputs, name): V = inputs QK = Dense(att_dim, bias=None)(inputs) QK = Dense(att_dim, bias=None)(QK) QK = Activation("softmax", name=name)(QK) MV = Multiply()([V, QK]) return(MV) def bulid_model(train_set_mix, train_set_mix_label, ca_col, co_col): input_dim = train_set_mix.shape[-1] inputs = Input(shape=(input_dim,)) atts1 = Att(input_dim, inputs, "attention_vec") x = Dense(64, activation='relu')(atts1) x = Dense(32, activation='relu')(x) x = Dense(16, activation='relu')(x) # atts2 = Att(4, atts2, "attention_vec1") output = Dense(1)(x) model = Model(input=inputs, output=output) return model def Expand_Dim_Layer(tensor): def expand_dim(tensor): return K.expand_dims(tensor, axis=1) return Lambda(expand_dim)(tensor) def bulid_model_atts(train_set_mix, train_set_mix_label, ca_col, co_col): input_dim = train_set_mix.shape[-1] inputs_ = Input(shape=(input_dim,)) # inputs_emb = Embedding(10000, input_dim)(inputs_) atts1 = Self_Attention(input_dim)(inputs_) atts1 = GlobalAveragePooling1D()(atts1) x = Dense(64, activation='relu')(atts1) x = Dense(32, activation='relu')(x) x = Dense(16, activation='relu')(x) outputs = Dense(1)(x) model = Model(inputs=inputs_, outputs=outputs) model.summary() return model def run(train_data, test_data, seed, reg_flag=False, label_enh_flag=False, reg_enh_flag=False, target='label'): train_data, test_data, co_col, ca_col = foo(train_data, test_data, ca_co_sel_flag=False, onehot_flag=True) train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \ get_dataset_(train_data, test_data, clean_ratio=clean_ratio, test_retio=test_ratio, seed=seed, val_ratio=val_ratio) train_curr_label = train_set_mix_label[target] test_curr_label = test_set_label[target] model = bulid_model_atts(train_set_mix, train_set_mix_label, ca_col, co_col) rms = optimizers.RMSprop(lr=1e-4) model.compile(optimizer=rms, loss='mean_squared_error', metrics=['mse', 'mae', r2, r_square]) model.fit(train_set_mix, train_curr_label, epochs=epoch, batch_size=batchsize, validation_split=0.2, callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)]) train_target_pred = model.predict(train_set_mix) test_target_pred = model.predict(test_set) num_data = train_set_mix.shape[0] num_feat = train_set_mix.shape[1] train_r2 = r2_score(train_set_mix_label[target], train_target_pred) train_r2_ad = 1 - ((1 - train_r2) * (num_data - 1)) / abs(num_data - num_feat - 1) test_r2 = r2_score(test_set_label[target], test_target_pred) test_r2_ad = 1 - ((1 - test_r2) * (num_data - 1)) / abs(num_data - num_feat - 1) train_mse = mean_squared_error(train_set_mix_label[target], train_target_pred) train_mae = mean_absolute_error(train_set_mix_label[target], train_target_pred) test_mse = mean_squared_error(test_set_label[target], test_target_pred) test_mae = mean_absolute_error(test_set_label[target], test_target_pred) test_mape = mean_absolute_percentage_error(test_set_label[target], test_target_pred.reshape(-1, )) err_enh = test_set_label[target] - np.squeeze(test_target_pred) # attention_vector = get_activations(model, train_set_mix[:1], # print_shape_only=True, # layer_name='attention_vec')[0].flatten() # pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar', # title='Attention Mechanism as a ' # 'function of input dimensions.') # plt.show() return test_r2, test_r2_ad, test_mse def run_many(train_data, test_data): metric_list_all = [] for trial in tqdm(six.moves.xrange(nrun)): # train_metric, test_metric, train_metric_enh, test_metric_enh = \ # run(train_data, test_data, (trial * 2020) % 1000, reg_flag=True, label_enh_flag=True, reg_enh_flag=True) metric_list = run(train_data, test_data, (trial * 2020) % 1000, reg_flag=True, label_enh_flag=True, reg_enh_flag=True) metric_list_all.append(metric_list) metric_df = pd.DataFrame(np.array(metric_list_all)) metric_df.columns = ['train_metric', 'train_metric_enh', 'test_metric', 'test_metric_enh'] for col in metric_df.columns: print('{} metric {:.3f} ({:.3f}) max: {:.3f}'.format(col, metric_df[col].mean(), metric_df[col].std(), metric_df[col].max())) pass def main(): train_data, test_data = load_data_(datasets_name) run_many(train_data, test_data) pass np.random.seed(2020) datasets_name = 'LiverAblation' nrun = 5 clean_ratio = 1 test_ratio = 0.2 val_ratio = 0 epoch = 3000 batchsize = 16 iter_ = 10 step_ = 0.001 if __name__ == '__main__': main()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
shashi/phosphene
refs/heads/master
{"/src/apps/devices/cube.py": ["/src/apps/devices/device.py"], "/src/apps/psychroom.py": ["/src/apps/cube.py", "/src/apps/devices/ledwall.py", "/src/apps/devices/waterfall.py", "/src/apps/devices/discoball.py", "/src/apps/devices/cube.py"], "/src/phosphene/signal.py": ["/src/phosphene/util.py"], "/src/apps/devices/waterfall.py": ["/src/phosphene/graphs.py"], "/src/demo.py": ["/src/phosphene/graphs.py"]}
└── └── src ├── apps │ ├── cube.py │ ├── devices │ │ ├── __init__.py │ │ ├── animations.py │ │ ├── cube.py │ │ ├── cubelib │ │ │ ├── __init__.py │ │ │ └── emulator.py │ │ ├── device.py │ │ ├── discoball.py │ │ ├── ledwall.py │ │ └── waterfall.py │ ├── pathsetup.py │ └── psychroom.py ├── demo.py ├── phosphene │ ├── __init__.py │ ├── audio.py │ ├── dsp.py │ ├── graphs.py │ ├── signal.py │ ├── signalutil.py │ └── util.py └── setup.py
/src/apps/cube.py
from devices.cubelib import emulator from devices.cubelib import mywireframe as wireframe from devices.animations import * pv = emulator.ProjectionViewer(640,480) wf = wireframe.Wireframe() def cubeProcess(cube, signal, count): pv.createCube(wf) start = (0, 0, 0) point = (0,0) #planeBounce(cube,(count/20)%2+1,count%20) #start = wireframeExpandContract(cube,start) #rain(cube,count,5,10) #time.sleep(.1) #point = voxel(cube,count,point) #sine_wave(cube,count) #pyramids(cube,count) #side_waves(cube,count) #fireworks(cube,4) technites(cube,count) cube.redraw(wf, pv) return count + 1
/src/apps/devices/__init__.py
__all__ = ["discoball", "cube", "waterfall"]
/src/apps/devices/animations.py
import numpy import random import time from cubelib import mywireframe from cubelib import emulator # TODO: # shiftPlane(axis, plane, delta) # moves the plane along the axis by delta steps, if it exceeds dimensions, just clear it out, don't rotate. # swapPlanes(axis1, plane1, axis2, plane2) # rain should set random LEDs on the first plane (not a lot of them) # and shift the plane along that axis by one step---Fixed # and shift the plane along that axis by one step # # THINK: # The python code keeps sending a 125 byte string to redraw the # cube as often as it can, this contains 1000 bit values that the MSP # handles. Now, in our code we have been using time.sleep() a lot. # We probably can have a counter that each of these functions uses to # advance its steps, and then increment / decrement that # counter according to music def wireframeCubeCenter(cube,size): if size % 2 == 1: size = size+1 half = size/2 start = cube.dimension/2 - half end = cube.dimension/2 + half - 1 for x in range(0,cube.dimension): for y in range(0,cube.dimension): for z in range(0,cube.dimension): cube.set_led(x,y,z,0) for x in (start,end): for y in (start,end): for z in range(start,end+1): cube.set_led(x,y,z) cube.set_led(x,z,y) cube.set_led(z,x,y) def wireframeCube(cube,START,END): x0,y0,z0 = START x1,y1,z1 = END print "start:",START,"end:",END for x in range(0,cube.dimension): for y in range(0,cube.dimension): for z in range(0,cube.dimension): cube.set_led(x,y,z,0) for x in (x0,x1): for y in (y0,y1): if z0<z1: for z in range(z0,z1+1): cube.set_led(x,y,z) print x,y,z, "set-1st condition" else: for z in range(z1,z0+1): cube.set_led(x,y,z) print x,y,z, "set-2nd condition" for x in (x0,x1): for z in (z0,z1): if y0<y1: for y in range(y0,y1+1): cube.set_led(x,y,z) print x,y,z, "Set - 1st" else: for y in range(y1,y0+1): cube.set_led(x,y,z) print x,y,z, "Set - 2nd" for y in (y0,y1): for z in (z0,z1): if x0<x1: for x in range(x0,x1+1): cube.set_led(x,y,z) print x,y,z, "SET - 1st" else: for x in range(x1,x0+1): cube.set_led(x,y,z) print x,y,z, "SET - 2nd" def solidCubeCenter(cube,size): if size % 2 == 1: size = size+1 half = size/2 start = cube.dimension/2 - half end = cube.dimension/2 + half for x in range(0,cube.dimension): for y in range(0,cube.dimension): for z in range(0,cube.dimension): cube.set_led(x,y,z,0) for i in range(start,end): for j in range(start,end): for k in range(start,end): cube.set_led(i,j,k) def solidCube(cube,START,END): x0,y0,z0 = START x1,y1,z1 = END for x in range(0,cube.dimension): for y in range(0,cube.dimension): for z in range(0,cube.dimension): cube.set_led(x,y,z,0) for i in range(x0,x1+1): for j in range(y0,y1+1): for k in range(z0,z1+1): cube.set_led(i,j,k) def setPlane(cube,axis,x,level = 1): plane = level if isinstance(level, int): plane = numpy.array([[level]*10]*10, dtype=bool) if axis == 1: for i in range(0,cube.dimension): for j in range(0,cube.dimension): cube.set_led(x,i,j,plane[i][j]) elif axis == 2: for i in range(0,cube.dimension): for j in range(0,cube.dimension): cube.set_led(i,x,j,plane[i][j]) else: for i in range(0,cube.dimension): for j in range(0,cube.dimension): cube.set_led(i,j,x,plane[i][j]) def shiftPlane(cube,axis,plane,delta): if axis == 1: for i in range(0,cube.dimension): for j in range(0,cube.dimension): try: cube.set_led(plane+delta,i,j,cube.get_led(plane,i,j)) cube.set_led(plane,i,j,0) except: cube.set_led(plane,i,j,0) elif axis == 2: for i in range(0,cube.dimension): for j in range(0,cube.dimension): try: cube.set_led(i,plane+delta,j,cube.get_led(i,plane,j)) cube.set_led(i,plane,j,0) except: cube.set_led(i,plane,j,0) else: for i in range(0,cube.dimension): for j in range(0,cube.dimension): try: cube.set_led(i,j,plane+delta,cube.get_led(i,j,plane)) cube.set_led(i,j,plane,0) except: cube.set_led(i,j,plane,0) #def swapPlane(cube,axis,plane1,plane2): def randPlane(cube,minimum,maximum): array = numpy.array([[0]*cube.dimension]*cube.dimension,dtype = 'bool') for i in range(minimum,maximum): x = random.choice([i for i in range(0,cube.dimension)]) y = random.choice([i for i in range(0,cube.dimension)]) array[x][y] = 1 return array def wireframeExpandContract(cube,start=(0,0,0)): (x0, y0, z0) = start for i in range(0,cube.dimension): j = cube.dimension - i - 1 if(x0 == 0): if(y0 == 0 and z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0+i)) elif(y0 == 0): wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0-i)) elif(z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0+i)) else: wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0-i)) else: if(y0 == 0 and z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0+i)) elif(y0 == 0): wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0-i)) elif(z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0+i)) else: wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0-i)) time.sleep(0.1) cube.redraw() max_coord = cube.dimension - 1 corners = [0,max_coord] x0 = random.choice(corners) y0 = random.choice(corners) z0 = random.choice(corners) for j in range(0,cube.dimension): i = cube.dimension - j - 1 if(x0 == 0): if(y0 == 0 and z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0+i)) elif(y0 == 0): wireframeCube(cube,(x0,y0,z0),(x0+i,y0+i,z0-i)) elif(z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0+i)) else: wireframeCube(cube,(x0,y0,z0),(x0+i,y0-i,z0-i)) else: if(y0 == 0 and z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0+i)) elif(y0 == 0): wireframeCube(cube,(x0,y0,z0),(x0-i,y0+i,z0-i)) elif(z0 == 0): wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0+i)) else: wireframeCube(cube,(x0,y0,z0),(x0-i,y0-i,z0-i)) cube.redraw() time.sleep(0.1) return (x0, y0, z0) # return the final coordinate def rain(cube,counter,minimum,maximum,axis=3): shiftCube(cube,3,1) setPlane(cube,axis,9,randPlane(cube,minimum,maximum)) def planeBounce(cube,axis,counter): i = counter%20 if i: if i<10: #to turn off the previous plane setPlane(cube,axis,i-1,0) elif i>10: setPlane(cube,axis,20-i,0) if i<10: setPlane(cube,axis,i) elif i>10: setPlane(cube,axis,19-i) def square(cube,size,translate=(0,0)): x0,y0 = translate array = numpy.array([[0]*cube.dimension] * cube.dimension) for i in range(0,size): for j in range(0,size): array[i+x0][j+y0] = 1 return array def distance(point1,point2): x0,y0 = point1 x1,y1 = point2 return numpy.sqrt((x0-x1)**2 + (y0-y1)**2) def circle(cube,radius,translate=(0,0)): x1,y1 = translate array = numpy.array([[0]*cube.dimension] * cube.dimension) for i in range(0,2*radius): for j in range(0,2*radius): if distance((i,j),(radius,radius))<=radius: array[i+x1][j+y1] = 1 return array def wierdshape(cube,diagonal,translate=(0,0)): x1,y1 = translate array = numpy.array([[0]*cube.dimension] * cube.dimension) if diagonal%2 == 0: diagonal-=1 for y in range(0,diagonal): for x in range(0,diagonal): if(y>=diagonal/2): if(x<=diagonal/2): if(x>=y): array[x][y] = 1 else: if(x<=y): array[x][y] = 1 else: if(x<=diagonal/2): if(x+y>=diagonal/2): array[x][y] = 1 else: if(x+y<=diagonal/2): array[x][y] = 1 return array def fillCube(cube,level=1): for x in range(0,cube.dimension): for y in range(0,cube.dimension): for z in range(0,cube.dimension): cube.set_led(x,y,z,level) def voxel(cube,counter,point): x,y = point if(counter==0): fillCube(cube,0) for x in range(0,cube.dimension): for y in range(0,cube.dimension): cube.set_led(x,y,random.choice([0,cube.dimension-1])) if counter%9==0: x = random.choice([i for i in range(0,cube.dimension)]) y = random.choice([i for i in range(0,cube.dimension)]) if cube.get_led(x,y,counter%9)==1: cube.set_led(x,y,counter%9+1) cube.set_led(x,y,counter%9,0) else: cube.set_led(x,y,8-(counter%9)) cube.set_led(x,y,9-(counter%9),0) return (x,y) def shiftCube(cube,axis,delta): for x in range(0,10): for y in range(0,10): for z in range(0,9): if axis == 3: cube.set_led(x,y,z,cube.get_led(x,y,z+delta)) cube.set_led(x,y,z+delta,0) elif axis == 2: cube.set_led(x,z,y,cube.get_led(x,z+delta,y)) cube.set_led(x,y,z+delta,0) elif axis == 1: cube.set_led(z,x,y,cube.get_led(z+delta,x,y)) cube.set_led(z+delta,x,y,0) def pyramids(cube,counter,axis = 3): if(counter%20 <cube.dimension): size = counter%10 + 1 setPlane(cube,axis,cube.dimension-1,square(cube,counter%10 + 1,((cube.dimension-counter%10-1)/2,(cube.dimension-counter%10-1)/2))) shiftCube(cube,axis,1) else: size = 9 - (counter-10)%10 translate = (cube.dimension - size)/2 setPlane(cube,axis,cube.dimension-1,square(cube,size,(translate,translate))) shiftCube(cube,axis,1) time.sleep(0) print "counter = ",counter,"size=",size def sine_wave(cube,counter): fillCube(cube,0) center = (cube.dimension-1)/2.0 for x in range(0,cube.dimension): for y in range(0,cube.dimension): dist = distance((x,y),(center,center)) cube.set_led(x,y,int(counter%10+numpy.sin(dist+counter))) def side_waves(cube,counter): fillCube(cube,0) origin_x=4.5; origin_y=4.5; for x in range(0,10): for y in range(0,10): origin_x=numpy.sin(counter); origin_y=numpy.cos(counter); z=int(numpy.sin(numpy.sqrt(((x-origin_x)*(x-origin_x))+((y-origin_y)*(y-origin_y))))+counter%10); cube.set_led(x,y,z); def fireworks(cube,n): origin_x = 3; origin_y = 3; origin_z = 3; #Particles and their position, x,y,z and their movement,dx, dy, dz origin_x = random.choice([i for i in range(0,4)]) origin_y = random.choice([i for i in range(0,4)]) origin_z = random.choice([i for i in range(0,4)]) origin_z +=5; origin_x +=2; origin_y +=2; particles = [[None for _ in range(6)] for _ in range(n)] print particles #shoot a particle up in the air value was 600+500 for e in range(0,origin_z): cube.set_led(origin_x,origin_y,e,1); time.sleep(.05+.02*e); cube.redraw() fillCube(cube,0) for f in range(0,n): #Position particles[f][0] = origin_x particles[f][1] = origin_y particles[f][2] = origin_z rand_x = random.choice([i for i in range(0,200)]) rand_y = random.choice([i for i in range(0,200)]) rand_z = random.choice([i for i in range(0,200)]) try: #Movement particles[f][3] = 1-rand_x/100.0 #dx particles[f][4] = 1-rand_y/100.0 #dy particles[f][5] = 1-rand_z/100.0 #dz except: print "f:",f #explode for e in range(0,25): slowrate = 1+numpy.tan((e+0.1)/20)*10 gravity = numpy.tan((e+0.1)/20)/2 for f in range(0,n): particles[f][0] += particles[f][3]/slowrate particles[f][1] += particles[f][4]/slowrate particles[f][2] += particles[f][5]/slowrate; particles[f][2] -= gravity; cube.set_led(int(particles[f][0]),int(particles[f][1]),int(particles[f][2])) time.sleep(1000) def T(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for i in range(3,7): for j in range(3,10): plane[i][j] = 1 return plane def E(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(4,7): plane[i][j] = 1 for j in range(8,10): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 return plane def B(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,2): plane[i][j] = 1 for j in range(4,6): plane[i][j] = 1 for j in range(8,10): plane[i][j] = 1 for j in range(0,10): for i in range(0,3): plane[i][j] = 1 for i in range(7,10): plane[i][j] = 1 plane[9][0] = 0 plane[9][9] = 0 return plane def A(): plane = numpy.array([[0]*10] *10) for i in range(0,10): for j in range(0,2): plane[i][j] = 1 for j in range(4,7): plane[i][j] = 1 for j in range(0,10): for i in range(0,3): plane[i][j] = 1 for i in range(7,10): plane[i][j] = 1 return plane def C(): plane = numpy.array([[0]*10] *10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(7,10): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 return plane def D(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,2): plane[i][j] = 1 for j in range(8,10): plane[i][j] = 1 for j in range(0,10): for i in range(0,2): plane[i][j] = 1 for i in range(8,10): plane[i][j] = 1 plane[9][0] = 0 plane[9][9] = 0 return plane def F(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(4,7): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 return plane def H(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(4,7): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 for i in range(7,10): for j in range(0,10): plane[i][j] = 1 return plane def G(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(7,10): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 for i in range(7,10): for j in range(4,10): plane[i][j] = 1 for i in range(4,10): for j in range(4,6): plane[i][j] = 1 return plane def J(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for i in range(3,7): for j in range(3,10): plane[i][j] = 1 for i in range(0,3): for j in range(7,10): plane[i][j] = 1 return plane def K(): plane = numpy.array([[0]*10]*10) for j in range(0,10): for i in range(0,2): plane[i][j] = 1 for i in range(0,10): for j in range(0,10): if(i == j): plane[i][5+j/2] = 1 try: plane[i-1][4+j/2] = 1 plane[i+1][4+j/2] = 1 except: print "Blaaah" if(i+j==9): plane[i][j/2] = 1 try: plane[i-1][j/2] = 1 plane[i+1][j/2] = 1 except: print "Blaaah" plane[9][5] = 0 plane[9][4] = 0 return plane def L(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(7,10): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 return plane def M(): plane = numpy.array([[0]*10] * 10) for i in range(0,2): for j in range(0,10): plane[i][j] = 1 for i in range(8,10): for j in range(0,10): plane[i][j] = 1 #for i in range(4,7): #for j in range(0,10): # plane[i][j] = 1 for i in range(0,10): for j in range(0,10): if(i == j): plane[i/2][j] = 1 try: plane[i/2][j-1] = 1 plane[i/2][j+1] = 1 except: print "Blaaah" if(i+j==9): plane[5 + i/2][j] = 1 try: plane[5+i/2][j-1] = 1 plane[5+i/2][j+1] = 1 except: print "Blaaah" return plane def N(): plane = numpy.array([[0]*10] * 10) for i in range(0,3): for j in range(0,10): plane[i][j] = 1 for i in range(7,10): for j in range(0,10): plane[i][j] = 1 for i in range(0,10): for j in range(0,10): if(i == j): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" return plane def O(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(7,10): plane[i][j] = 1 for j in range(0,10): for i in range(0,3): plane[i][j] = 1 for i in range(7,10): plane[i][j] = 1 return plane def P(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,2): plane[i][j] = 1 for j in range(4,7): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 for i in range(7,10): for j in range(0,4): plane[i][j] = 1 return plane def Q(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,2): plane[i][j] = 1 for j in range(8,10): plane[i][j] = 1 for j in range(0,10): for i in range(0,2): plane[i][j] = 1 for i in range(8,10): plane[i][j] = 1 for i in range(5,10): for j in range(5,10): if(i == j): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" return plane def R(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(4,6): plane[i][j] = 1 for i in range(0,3): for j in range(0,10): plane[i][j] = 1 for i in range(7,10): for j in range(0,4): plane[i][j] = 1 for i in range(0,10): for j in range(0,10): if(i == j): plane[i][5+j/2] = 1 try: plane[i-1][4+j/2] = 1 plane[i+1][4+j/2] = 1 except: print "Blaaah" return plane def I(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(7,10): plane[i][j] = 1 for i in range(3,7): for j in range(3,10): plane[i][j] = 1 return plane def S(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(4,7): plane[i][j] = 1 for j in range(8,10): plane[i][j] = 1 for i in range(0,3): for j in range(0,7): plane[i][j] = 1 for i in range(7,10): for j in range(4,10): plane[i][j] = 1 return plane def U(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(7,10): plane[i][j] = 1 for j in range(0,10): for i in range(0,3): plane[i][j] = 1 for i in range(7,10): plane[i][j] = 1 return plane def V(): plane = numpy.array([[0]*10] * 10) for i in range(0,10): for j in range(0,10): if(i == j): plane[i/2][j] = 1 try: plane[i/2][j-1] = 1 plane[i/2][j+1] = 1 except: print "Blaaah" if(i+j==9): plane[5 + i/2][j] = 1 try: plane[5+i/2][j-1] = 1 plane[5+i/2][j+1] = 1 except: print "Blaaah" plane[0][9] = 0 plane[9][9] = 0 return plane def W(): plane = numpy.array([[0]*10] * 10) for i in range(0,2): for j in range(0,10): plane[i][j] = 1 for i in range(8,10): for j in range(0,10): plane[i][j] = 1 #for i in range(4,7): #for j in range(0,10): # plane[i][j] = 1 for i in range(0,10): for j in range(0,10): if(i == j): plane[5+i/2][j] = 1 try: plane[5+i/2][j+2] = 1 plane[5+i/2][j+1] = 1 except: print "Blaaah" if(i+j==9): plane[i/2][j] = 1 try: plane[i/2][j+2] = 1 plane[i/2][j+1] = 1 except: print "Blaaah" return plane def X(): plane = numpy.array([[0]*10]*10) for i in range(0,10): for j in range(0,10): if(i == j): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" if(i+j == 9): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" return plane def Y(): plane = numpy.array([[0]*10]*10) for i in range(0,10): for j in range(0,5): if(i == j): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" if(i+j == 9): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" for i in range(4,6): for j in range(5,10): plane[i][j] = 1 plane[0][9] = 0 plane[0][0] = 0 return plane def Z(): plane = numpy.array([[0]*10]*10) for i in range(0,10): for j in range(0,10): if(i+j == 9): plane[i][j] = 1 try: plane[i][j-1] = 1 plane[i][j+1] = 1 except: print "Blaaah" for i in range(0,10): for j in range(0,3): plane[i][j] = 1 for j in range(7,10): plane[i][j] = 1 return plane def stringPrint(cube,string,counter=0,axis = 3): if counter%10 ==0: fillCube(cube,0) i = string[(counter/10)%len(string)] if i == 'A': setPlane(cube,axis,9,A()) elif i == 'B': setPlane(cube,axis,9,B()) elif i == 'C': setPlane(cube,axis,9,C()) elif i == 'D': setPlane(cube,axis,9,D()) elif i == 'E': setPlane(cube,axis,9,E()) elif i == 'F': setPlane(cube,axis,9,F()) elif i == 'G': setPlane(cube,axis,9,G()) elif i == 'H': setPlane(cube,axis,9,H()) elif i == 'I': setPlane(cube,axis,9,I()) elif i == 'J': setPlane(cube,axis,9,J()) elif i == 'K': setPlane(cube,axis,9,K()) elif i == 'L': setPlane(cube,axis,9,L()) elif i == 'M': setPlane(cube,axis,9,M()) elif i == 'N': setPlane(cube,axis,9,N()) elif i == 'O': setPlane(cube,axis,9,O()) elif i == 'P': setPlane(cube,axis,9,P()) elif i == 'Q': setPlane(cube,axis,9,Q()) elif i == 'R': setPlane(cube,axis,9,R()) elif i == 'S': setPlane(cube,axis,9,S()) elif i == 'T': setPlane(cube,axis,9,T()) elif i == 'U': setPlane(cube,axis,9,U()) elif i == 'V': setPlane(cube,axis,9,V()) elif i == 'W': setPlane(cube,axis,9,W()) elif i == 'X': setPlane(cube,axis,9,X()) elif i == 'Y': setPlane(cube,axis,9,Y()) elif i == 'Z': setPlane(cube,axis,9,Z()) else: shiftCube(cube,axis,1) def stringfly(cube,axis): shiftCube(cube,axis,1) def technites(cube,counter,axis = 3): alpha = counter/9 if(counter%90 == 0): fillCube(cube,0) setPlane(cube,axis,9,T(cube)) elif(counter%90 == 10): fillCube(cube,0) setPlane(cube,axis,9,E(cube)) elif(counter%90 == 20): fillCube(cube,0) setPlane(cube,axis,9,C(cube)) elif(counter%90 == 30): fillCube(cube,0) setPlane(cube,axis,9,H(cube)) elif(counter%90 == 40): fillCube(cube,0) setPlane(cube,axis,9,N(cube)) elif(counter%90 == 50): fillCube(cube,0) setPlane(cube,axis,9,I(cube)) elif(counter%90 == 60): fillCube(cube,0) setPlane(cube,axis,9,T(cube)) elif(counter%90 == 70): fillCube(cube,0) setPlane(cube,axis,9,E(cube)) elif(counter%90 == 80): fillCube(cube,0) setPlane(cube,axis,9,S(cube)) else: stringfly(cube,axis) def moveFaces(cube): Z0 = numpy.array([[0]*cube.dimension]*cube.dimension) Z9 = numpy.array([[0]*cube.dimension]*cube.dimension) X0 = numpy.array([[0]*cube.dimension]*cube.dimension) X9 = numpy.array([[0]*cube.dimension]*cube.dimension) for i in range(1,cube.dimension): for j in range(0,cube.dimension): X0[i-1][j] = cube.get_led(i,j,0) for j in range(0,cube.dimension): X0[9][j] = cube.get_led(9,j,0) for i in range(0,cube.dimension-1): for j in range(0,cube.dimension): Z0[i+1][j] = cube.get_led(0,j,i) for j in range(0,cube.dimension): Z0[0][j] = cube.get_led(0,j,0) for i in range(0,cube.dimension-1): for j in range(0,cube.dimension): X9[i+1][j] = cube.get_led(i,j,9) for j in range(0,cube.dimension): X9[0][j] = cube.get_led(0,j,9) for i in range(1,cube.dimension): for j in range(0,cube.dimension): Z9[i-1][j] = cube.get_led(9,j,i) for j in range(0,cube.dimension): Z9[9][j] = cube.get_led(9,j,9) fillCube(cube,0) setPlane(cube,3,0,X0) setPlane(cube,1,0,Z0) setPlane(cube,3,9,X9) setPlane(cube,1,9,Z9)
/src/apps/devices/cube.py
import serial import numpy import math from device import Device from cubelib import emulator from cubelib import mywireframe as wireframe from animations import * import time import threading # A class for the cube class Cube(Device): def __init__(self, port, dimension=10, emulator=False): Device.__init__(self, "Cube", port) self.array = numpy.array([[\ [0]*dimension]*dimension]*dimension, dtype='bool') self.dimension = dimension self.emulator = emulator self.name = "Cube" def set_led(self, x, y, z, level=1): self.array[x][y][z] = level def get_led(self, x, y, z): return self.array[x][y][z] def takeSignal(self, signal): pass def toByteStream(self): # 104 bits per layer, first 4 bits waste. bytesPerLayer = int(math.ceil(self.dimension**2 / 8.0)) print bytesPerLayer discardBits = bytesPerLayer * 8 - self.dimension**2 print discardBits bts = bytearray(bytesPerLayer*self.dimension) pos = 0 mod = 0 for layer in self.array: mod = discardBits for row in layer: for bit in row: if bit: bts[pos] |= 1 << mod else: bts[pos] &= ~(1 << mod) mod += 1 if mod == 8: mod = 0 pos += 1 return bts def redraw(self, wf=None, pv=None): if self.emulator: wf.setVisible(emulator.findIndexArray(self.array)) pv.run() if __name__ == "__main__": cube = Cube("/dev/ttyACM0") #pv = emulator.ProjectionViewer(640,480) #wf = wireframe.Wireframe() #pv.createCube(wf) count = 0 start = (0, 0, 0) point = (0,0) #fillCube(cube,0) #cube.redraw() #time.sleep(100) def sendingThread(): while True: cube.port.write("S") bs = cube.toByteStream() for i in range(0, 130): time.sleep(0.01) cube.port.write(chr(bs[i])) print "wrote", bs[i] assert(cube.port.read() == '.') t = threading.Thread(target=sendingThread) t.start() #fillCube(cube,0) #cube.set_led(9,9,9) #for x in range(0, 9): # for y in range(0, 9): # for z in range(0, 9): # cube.set_led(x, y, z, 1) # time.sleep(1) while True: #wireframeCube(cube,(1,1,1),(9,9,9)) fillCube(cube, 1) #planeBounce(cube,(count/20)%2+1,count%20) #planeBounce(cube,1,count) #start = wireframeExpandContract(cube,start) #rain(cube,count,5,10) #time.sleep(.1) #point = voxel(cube,count,point) #sine_wave(cube,count) #pyramids(cube,count) #side_waves(cube,count) #fireworks(cube,4) #technites(cube, count) #setPlane(cube,1,(counter/100)%10,1) #setPlane(cube,2,0,1) #stringPrint(cube,'TECHNITES',count) #moveFaces(cube) #cube.set_led(0,0,0) #cube.set_led(0,0,1) cube.redraw() count += 1 time.sleep(0.1)
/src/apps/devices/cubelib/__init__.py
__all__ = ["emulator", "mywireframe"]
/src/apps/devices/cubelib/emulator.py
#!/bin/env python #using the wireframe module downloaded from http://www.petercollingridge.co.uk/ import mywireframe as wireframe import pygame from pygame import display from pygame.draw import * import time import numpy key_to_function = { pygame.K_LEFT: (lambda x: x.translateAll('x', -10)), pygame.K_RIGHT: (lambda x: x.translateAll('x', 10)), pygame.K_DOWN: (lambda x: x.translateAll('y', 10)), pygame.K_UP: (lambda x: x.translateAll('y', -10)), pygame.K_EQUALS: (lambda x: x.scaleAll(1.25)), pygame.K_MINUS: (lambda x: x.scaleAll( 0.8)), pygame.K_q: (lambda x: x.rotateAll('X', 0.1)), pygame.K_w: (lambda x: x.rotateAll('X', -0.1)), pygame.K_a: (lambda x: x.rotateAll('Y', 0.1)), pygame.K_s: (lambda x: x.rotateAll('Y', -0.1)), pygame.K_z: (lambda x: x.rotateAll('Z', 0.1)), pygame.K_x: (lambda x: x.rotateAll('Z', -0.1))} class ProjectionViewer: """ Displays 3D objects on a Pygame screen """ def __init__(self, width, height): self.width = width self.height = height self.screen = pygame.display.set_mode((width, height)) pygame.display.set_caption('Wireframe Display') self.background = (10,10,50) self.wireframes = {} self.displayNodes = True self.displayEdges = True self.nodeColour = (255,255,255) self.edgeColour = (200,200,200) self.nodeRadius = 3 #Modify to change size of the spheres def addWireframe(self, name, wireframe): """ Add a named wireframe object. """ self.wireframes[name] = wireframe def run(self): for event in pygame.event.get(): if event.type == pygame.KEYDOWN: if event.key in key_to_function: key_to_function[event.key](self) self.display() pygame.display.flip() def display(self): """ Draw the wireframes on the screen. """ self.screen.fill(self.background) for wireframe in self.wireframes.values(): if self.displayEdges: for edge in wireframe.edges: pygame.draw.aaline(self.screen, self.edgeColour, (edge.start.x, edge.start.y), (edge.stop.x, edge.stop.y), 1) if self.displayNodes: for node in wireframe.nodes: if node.visiblity: pygame.draw.circle(self.screen, self.nodeColour, (int(node.x), int(node.y)), self.nodeRadius, 0) def translateAll(self, axis, d): """ Translate all wireframes along a given axis by d units. """ for wireframe in self.wireframes.itervalues(): wireframe.translate(axis, d) def scaleAll(self, scale): """ Scale all wireframes by a given scale, centred on the centre of the screen. """ centre_x = self.width/2 centre_y = self.height/2 for wireframe in self.wireframes.itervalues(): wireframe.scale((centre_x, centre_y), scale) def rotateAll(self, axis, theta): """ Rotate all wireframe about their centre, along a given axis by a given angle. """ rotateFunction = 'rotate' + axis for wireframe in self.wireframes.itervalues(): centre = wireframe.findCentre() getattr(wireframe, rotateFunction)(centre, theta) def createCube(self,cube,X=[50,140], Y=[50,140], Z=[50,140]): cube.addNodes([(x,y,z) for x in X for y in Y for z in Z]) #adding the nodes of the cube framework. allnodes = [] cube.addEdges([(n,n+4) for n in range(0,4)]+[(n,n+1) for n in range(0,8,2)]+[(n,n+2) for n in (0,1,4,5)]) #creating edges of the cube framework. for i in range(0,10): for j in range(0,10): for k in range(0,10): allnodes.append((X[0]+(X[1]-X[0])/9 * i,Y[0]+(Y[1] - Y[0])/9 * j,Z[0] + (Z[1]-Z[0])/9 * k)) cube.addNodes(allnodes) #cube.outputNodes() self.addWireframe('cube',cube) def findIndex(coords): #Send coordinates of the points you want lit up. Will convert to neede indices = [] for nodes in coords: x,y,z = nodes index = x*100+y*10+z + 8 indices.append(index) return indices def findIndexArray(array): #Takes a 3-D numpy array containing bool of all the LED points. indices = [] for i in range(0,10): for j in range(0,10): for k in range(0,10): if(array[i][j][k] == 1): index = i*100+j*10+ k + 8 indices.append(index) return indices def wireframecube(size): if size % 2 == 1: size = size+1 half = size/2 start = 5 - half end = 5 + half - 1 cubecords = [(x,y,z) for x in (start,end) for y in (start,end) for z in range(start,end+1)]+[(x,z,y) for x in (start,end) for y in (start,end) for z in range(start,end+1)] + [(z,y,x) for x in (start,end) for y in (start,end) for z in range(start,end+1)] return cubecords def cubes(size): if size % 2 == 1: size = size+1 half = size/2 cubecords = [] for i in range(0,size): for j in range(0,size): for k in range(0,size): cubecords.append((5-half+i,5-half+j,5-half+k)) return cubecords if __name__ == '__main__': pv = ProjectionViewer(400, 300) allnodes =[] cube = wireframe.Wireframe() #storing all the nodes in this wireframe object. X = [50,140] Y = [50,140] Z = [50,140] pv.createCube(cube,X,Y,Z) YZface = findIndex((0,y,z) for y in range(0,10) for z in range(0,10)) count = 0 for k in range(1,150000): if k%5000 ==2500: count = (count+2)%11 cube.setVisible(findIndex(wireframecube(count))) pv.run()
/src/apps/devices/device.py
import serial import numpy from threading import Thread class Device: def __init__(self, name, port): self.array = [] try: self.port = serial.Serial(port) self.isConnected = True print "Connected to", name except Exception as e: self.port = None self.isConnected = False print "Error connecting to", name, e def setupSignal(self, signal): pass def graphOutput(self, signal): pass def truncate(self, array): return numpy.array([min(int(i), 255) for i in array]) def toByteStream(self, array): return [chr(i) for i in self.truncate(array)] def readAck(self): print self.port.read(size=1) # Read the acknowledgement def redraw(self): if self.isConnected: self.port.write(self.toByteStream()) self.port.read(size=1) #Acknowledgement else: #print "Connection to %s lost!" % self.name pass def isUnresponsive(self): print "%s is not responding! Stopping to communicate." self.isConnected = False
/src/apps/devices/discoball.py
import device from phosphene.signal import * from phosphene.signalutil import * from phosphene.graphs import * class DiscoBall(device.Device): def __init__(self, port): device.Device.__init__(self, "DiscoBall", port) def setupSignal(self, signal): signal.discoball = lift(lambda s: numpymap(lambda (a, b): 1 if a > b * 1.414 else 0, zip(s.avg12, s.longavg12))) def graphOutput(self, signal): return boopGraph(signal.discoball[:4]) def redraw(self, signal): data = self.truncate(signal.discoball[:4] * 255) print data self.port.write(self.toByteStream(data))
/src/apps/devices/ledwall.py
import device from phosphene.signal import * from phosphene.signalutil import * from phosphene.graphs import * class LEDWall(device.Device): def __init__(self, port): device.Device.__init__(self, "LEDWall", port) def setupSignal(self, signal): CHANNELS = 6 val = lambda s: [max(0, scipy.log(s.avg3[0]+1)) - scipy.log(s.longavg3[0]+1)] signal.avg1Falling = fallingMax(val) def f(s): n = int(min(6, max(0, val(s)[0] * CHANNELS / (s.avg1Falling[0] if s.avg1Falling[0] > 0.01 else 1)))) return [1 for i in range(0, n)] + [0 for i in range(0, 6-n)] signal.ledwall = lift(f) def graphOutput(self, signal): return None def redraw(self, signal): print "LEDWall", self.toByteStream(signal.ledwall) self.port.write(self.toByteStream(signal.ledwall))
/src/apps/devices/waterfall.py
import device from phosphene.signal import * import scipy, numpy from phosphene.graphs import barGraph class Waterfall(device.Device): def __init__(self, port): device.Device.__init__(self, "Waterfall", port) def setupSignal(self, signal): def waterfall(s): lights = [s.avg8[i] * 150 / max(0.5, s.longavg8[i]) \ for i in range(0, 8)] fans = [2*i for i in lights] lights.reverse() return lights + fans signal.waterfall = lift(waterfall) def graphOutput(self, signal): return barGraph(self.truncate(signal.waterfall) / 255.0) def redraw(self, signal): payload = self.toByteStream(signal.waterfall) self.port.write(payload)
/src/apps/pathsetup.py
import os, sys dirname = os.path.dirname here = os.path.abspath(__file__) parentdir = dirname(dirname(here)) sys.path.append(parentdir)
/src/apps/psychroom.py
# # This script plays an mp3 file and communicates via serial.Serial # with devices in the Technites psychedelic room to visualize the # music on them. # # It talks to 4 devices # WaterFall -- tubes with LEDs and flying stuff fanned to music # DiscoBall -- 8 60 watt bulbs wrapped in colored paper # LEDWall -- a 4 channel strip of LED # this time it was the LED roof instead :p # LEDCube -- a 10x10x10 LED cube - work on this is still on # # the script also has a sloppy pygame visualization of the fft and # beats data # import sys import time import scipy import pygame from pygame import display from pygame.draw import * import pathsetup # this module sets up PYTHONPATH for all this to work from devices.discoball import DiscoBall from devices.waterfall import Waterfall from devices.ledwall import LEDWall from devices.cube import Cube import phosphene from phosphene import audio, signalutil, util from phosphene.util import * from phosphene.signal import * from phosphene.dsp import * from phosphene.graphs import * from phosphene.signalutil import * from cube import cubeProcess #from phosphene import cube from threading import Thread # Setup devices with their corresponding device files devs = [ Waterfall("/dev/ttyACM0"), DiscoBall("/dev/ttyACM1"), LEDWall("/dev/ttyACM2") ] pygame.init() surface = display.set_mode((640, 480)) if len(sys.argv) < 2: print "Usage: %s file.mp3" % sys.argv[0] sys.exit(1) else: fPath = sys.argv[1] sF, data = audio.read(fPath) import serial signal = Signal(data, sF) signal.A = lift((data[:,0] + data[:,1]) / 2, True) for d in devs: d.setupSignal(signal) def devices(s): #threads = [] for d in devs: if d.isConnected: def f(): d.redraw(s) d.readAck() #t = Thread(target=f) #threads.append(t) #t.start() f() #for t in threads: # t.join(timeout=2) # if t.isAlive(): # d.isUnresponsive() surface.fill((0, 0, 0)) graphsGraphs(filter( lambda g: g is not None, [d.graphOutput(signal) for d in devs]))(surface, (0, 0, 640, 480)) CubeState = lambda: 0 CubeState.count = 0 #cube = Cube("/dev/ttyACM1", emulator=True) def cubeUpdate(signal): CubeState.count = cubeProcess(cube, signal, CubeState.count) def graphsProcess(s): display.update() processes = [graphsProcess, devices] #, cube.emulator] signal.relthresh = 1.66 soundObj = audio.makeSound(sF, data) # make a pygame Sound object from the data # run setup on the signal signalutil.setup(signal) soundObj.play() # start playing it. This is non-blocking perceive(processes, signal, 90) # perceive your signal.
/src/demo.py
import sys import pdb import pygame from pygame import display from pygame.draw import * import scipy import time from phosphene import audio, util, signalutil, signal from phosphene.graphs import barGraph, boopGraph, graphsGraphs from threading import Thread if len(sys.argv) < 2: print "Usage: %s file.mp3" % sys.argv[0] sys.exit(1) else: fPath = sys.argv[1] # initialize PyGame SCREEN_DIMENSIONS = (640, 480) pygame.init() surface = display.set_mode(SCREEN_DIMENSIONS) sF, data = audio.read(fPath) sig = signal.Signal(data, sF) sig.A = signal.lift((data[:,0] + data[:,1]) / 2, True) def beats(s): """ Extract beats in the signal in 4 different frequency ranges """ # quick note: s.avg4 is a decaying 4 channel fft # s.longavg4 decays at a slower rate # beat detection huristic: # beat occured if s.avg4 * threshold > s.longavg4 threshold = 1.7 return util.numpymap( lambda (x, y): 1 if x > threshold * y else 0, zip(s.avg4 * threshold, s.longavg4)) # Lift the beats sig.beats = signal.lift(beats) # not sure if this can be called sustain. # blend gives a decay effect sig.sustain = signalutil.blend(beats, 0.7) def graphsProcess(s): # clear screen surface.fill((0, 0, 0)) # draw a decaying fft differential and the beats in the full # pygame window. graphsGraphs([ barGraph(s.avg12rel / 10), boopGraph(s.beats), boopGraph(s.sustain) ])(surface, (0, 0) + SCREEN_DIMENSIONS) # affect the window display.update() def repl(): """ call this function to give you a pdb shell while the program is running. You will be dropped in the current context. """ def replFunc(): pdb.set_trace() replThread = Thread(target=replFunc) replThread.start() #repl() # apply utility "lift"s -- this sets up signal.avgN and longavgN variables signalutil.setup(sig) soundObj = audio.makeSound(sF, data) # make a pygame Sound object from the data soundObj.play() # start playing it. This is non-blocking # perceive signal at 90 fps (or lesser when not possible) signal.perceive([graphsProcess], sig, 90)
/src/phosphene/__init__.py
__all__ = ["audio", "dsp", "signal", "graphs", "util"]
/src/phosphene/audio.py
import os from hashlib import sha1 import scipy.io.wavfile as wav import pygame.mixer from pygame.sndarray import make_sound # Set mixer defaults pygame.mixer.pre_init(44100, 16, 2, 4096) __all__ = ["read", "makeSound"] def digest(string): return sha1(string).hexdigest() def read(fname): """ Reads an audio file into a numpy array. returns frequency, samples """ # this is an ugly way to read mp3. But works well. # www.snip2code.com/Snippet/1767/Convert-mp3-to-numpy-array--Ugly--but-it suffix = digest(fname)[0:6] oname = '/tmp/tmp'+ suffix +'.wav' # ask lame to decode it to a wav file if not os.path.exists(oname): # Well, if you ctrl-c before conversion, you're going to # have to manually delete the file. cmd = 'lame --decode "%s" "%s"' % (fname, oname) os.system(cmd) # now read using scipy.io.wavfile data = wav.read(oname) # return samplingFrequency, samples return data[0], data[1] def makeSound(samplingFreq, data): """ Make a Player object from raw data returns a pygame.mixer.Sound object """ # Ugh! impurity pygame.mixer.init(frequency=samplingFreq) return make_sound(data)
/src/phosphene/dsp.py
import scipy import numpy from util import * def fftIdx(Fs, Hz, n): assert(Hz <= Fs / 2); return round(Fs / n * Hz) memFftIdx = memoize(fftIdx) def getNotes(): return [0] \ + [16.35 * pow(2, i/12.0) + 1 for i in range(0, 101)] \ + [11050, 22100] def group(n, fft, grouping=lambda i: i): """ Put fft data into n bins by adding them. grouping function defines how things are grouped lambda i: i --> linear grouping lambda i: 2 ** i --> logarithmic """ if isinstance(n, (list,tuple)): splitPoints = numpy.array(n, dtype=float) n = len(n) - 1 elif hasattr(grouping, '__call__'): splitPoints = numpy.array([grouping(i) for i in range(0, n + 1)], \ dtype=float) l = len(fft) splitIdx = splitPoints / abs(max(splitPoints)) * l splitIdx = [int(i) for i in splitIdx] #pdb.set_trace() return numpy.array( [sum(fft[splitIdx[i-1]:splitIdx[i]]) for i in range(1, n + 1)]) def fft(samples, out_n, env=None, eq=None): """ Returns the short time FFT at i, window width will be 1.5 * delta 1 * delta after i and 0.5 * delta before """ in_n = len(samples) if env: spectrum = abs(scipy.fft(samples * scipy.hamming(in_n) * envelope(in_n))) else: spectrum = abs(scipy.fft(samples)) if out_n: if eq: return group(out_n, spectrum[0:0.9*in_n/2]) * equalize(out_n) else: return group(out_n, spectrum[0:0.9*in_n/2]) else: if eq: return spectrum[0:in_n/2] * equalize(in_n/2) else: return spectrum[0:in_n/2] def equalize(N, scale=-0.02): f = lambda i: scale * scipy.log((N-i) * 1.0/N) return numpymap(f, range(0, N)) equalize=memoize(equalize) def envelope(N, power=1): mult = scipy.pi / N f = lambda i: pow(0.5 + 0.5 * scipy.sin(i*mult - scipy.pi / 2), power) return numpymap(f, range(0, N)) envelope=memoize(envelope)
/src/phosphene/graphs.py
import pdb import scipy import numpy import pygame from pygame import display from pygame.draw import * from pygame import Color import math def barGraph(data): """ drawing contains (x, y, width, height) """ def f(surface, rectangle): x0, y0, W, H = rectangle try: l = len(data) except: pdb.set_trace() w = W / l try: for i in range(0, l): h = data[i] c = Color(0, 0, 0, 0) c.hsva = (0, 100, 100, 0) x = x0 + i * w y = y0 + H * (1 - h) rect(surface, c, \ (x, y, 0.9 * w, h * H)) except: pdb.set_trace() return f def boopGraph(data): def f(surface, rectangle): x0, y0, W, H = rectangle try: l = len(data) except: pdb.set_trace() dx = W / l try: for i in range(0, l): d = data[i] a = dx * d x = (dx - a) / 2 + i * dx + x0 y = (H - dx) / 2 + (dx - a) / 2 + y0 c = Color(255, 255, 255, 255) rect(surface, c, \ (x, y, a, a)) except: pdb.set_trace() return f def circleRays(surface, center, data, transform=lambda y: scipy.log(y + 1)): x0, y0 = center total = math.radians(360) l = len(data) m = transform(max(data)) part = total/l for i in range(0, l): if m > 0: p = transform(data[i]) h = p * 5 hue = p / m c = Color(0, 0, 0, 0) c.hsva = ((1-hue) * 360, 100, 100, 0) x = x0 + (m*2+h)*math.cos(part * i) y = y0 + (m*2+h)*math.sin(part*i) line(surface, c, (x0,y0),(x,y),1) circle(surface,c, center,int(m*2),0) def graphsGraphs(graphs, direction=0): def f(surface, bigRect): x0, y0, W, H = bigRect h = H / len(graphs) for graph in graphs: graph(surface, (x0, y0, W, h)) y0 += h return f
/src/phosphene/signal.py
import time import numpy from util import indexable __all__ = [ 'Signal', 'lift', 'foldp', 'perceive' ] class lift: """ Annotate an object as lifted """ def __init__(self, f, t_indexable=None): self.f = f if hasattr(f, '__call__'): self._type = 'lambda' elif isinstance(self.f, (list, tuple, numpy.ndarray)): self._type = 'iterable' else: raise ValueError( """You can lift only a function that takes the signal as argument, or an iterable""" ) self.indexable = t_indexable def _manifest(self, signal): # compute the current value of this lifted # function given the current value of the signal if self._type == "lambda": return self.f(signal) elif self._type == "iterable": if self.indexable is None or self.indexable: # Make the array temporally indexable return indexable(self.f, signal.x) elif indexable == False: return self.f[signal.x] def foldp(f, init=None): """Fold a value over time """ State = lambda: 0 # hack to let me store state State.store = init State.val = None def g(signal): val, store = f(signal, State.store) State.store = store State.val = val return val return lift(g) class _WAIT: # _WAIT instances are used in the locking # mechanism in Signal to avoid recomputation # when multiple threads are using a signal pass class Signal: """ The Signal abstraction. """ def __init__(self, Y, sample_rate, max_fps=90): self.Y = Y self.x = 0 self.fps = 0 self.max_fps = max_fps self.sample_rate = sample_rate self.lifts = {} self.t = lift(lambda s: s.time()) self.A = lift(Y[:,0], True) self.cache = {} def time(self, t=time.time): # this signal's definition of time return t() def __getattr__(self, k): # call the thing that is requred with self if self.lifts.has_key(k): # Lifted values must have the same value # for the same x. Cache them. # This also helps in performance e.g. when # fft is needed a multiple places if self.cache.has_key(k): if isinstance(self.cache[k], _WAIT): # Locking mechanism to avoid # redundant computations by threads while isinstance(self.cache[k], _WAIT): pass return self.cache[k][1] else: x, val = self.cache[k] if x == self.x: return val self.cache[k] = _WAIT() val = self.lifts[k]._manifest(self) self.cache[k] = (self.x, val) return val else: return self.__dict__[k] def __setattr__(self, k, v): if isinstance(v, lift): self.lifts[k] = v else: self.__dict__[k] = v def set_state(self, x, fps, frames): self.x = x self.fps = fps self.frames = frames def perceive(processes, signal, max_fps): """Let processes perceive the signal simulates real-time reading of signals and runs all the functions in processes (these functions take the current signal value as argument) """ start_time = signal.time() call_spacing = 1.0 / max_fps sample_count = len(signal.Y) prev_x = -1 x = 0 frames = 0 fps = max_fps while True: tic = signal.time() # what should be the current sample? x = int((tic - start_time) * signal.sample_rate) if x >= sample_count: break frames += 1 # approximate current fps fps = fps * 0.5 + 0.5 * signal.sample_rate / float(x - prev_x) # Advance state of the signal signal.set_state(x, fps, frames) for p in processes: p(signal) # show processes the signal prev_x = x toc = signal.time() wait = call_spacing - (toc - tic) # chill out before looping again # FIXME: this assumes that the frame rate varies smoothly # i.e. next frame takes approximately takes the # same time as few frames immediately before it if wait > 0: time.sleep(wait)
/src/phosphene/signalutil.py
# Functions to help you lift and fold from .signal import * from dsp import * import numpy import pdb import math def setup(signal, horizon=576): # Note of awesome: this only sets up dependencies, # things absolutely necessary are evaluated. signal.fft = lift(lambda s: \ fft(s.A[-horizon/2:horizon/2], False, True, True)) for i in [1, 3, 4, 5, 6, 8, 12, 16, 32]: setup_bands(signal, i) def setup_bands(signal, bands): def get(s, prefix): return getattr(s, prefix + str(bands)) setattr(signal, 'chan%d' % bands, lift(lambda s: group(bands, s.fft))) setattr(signal, 'avg%d' % bands, blend(lambda s: get(s, 'chan'), lambda s, v, avg: 0.2 if v > avg else 0.5)) setattr(signal, 'longavg%d' % bands, blend(lambda s: get(s, 'chan'), lambda s, v, avg: 0.9 if s.frames < 50 else 0.992)) # Booya. thresh = 1.7 setattr(signal, 'peaks%d' % bands, blend(lambda s: get(s, 'avg') > thresh * get(s, 'longavg'), lambda s, v, a: 0.2)) setattr(signal, 'chan%drel' % bands, lift(lambda s: numpymap( lambda (x, y): x / y if y > 0.001 else 1, zip(get(s, 'chan'), get(s, 'longavg'))))) setattr(signal, 'avg%drel' % bands, lift(lambda s: numpymap( lambda (x, y): x / y if y > 0.001 else 1, zip(get(s, 'avg'), get(s, 'longavg'))))) ## Detecting beats def normalize(data, signal, divisor=None): if divisor is None: divisor = lambda s, n: getattr(s, 'longavg%d' % n) n = len(data) divs = divisor(signal, n) return numpymap(lambda (a, b): a / max(0.01, b), zip(data, divs)) def fallingMax(f, minf=lambda s: 0.5, cutoff=0.95, gravity=lambda s: 0.9): def maxer(signal, prev): # prev contains: thisFrame = f(signal) if prev == None: init = (thisFrame, [signal.t] * len(thisFrame)) return (init, init) maxVal, maxTime = prev mins = minf(signal) try: s = sum(mins) except: s = mins for i in range(0, len(thisFrame)): if thisFrame[i] > cutoff * maxVal[i] and s != 0: # Update maxVal[i] = thisFrame[i] maxTime[i] = signal.t else: # Fall maxVal[i] -= gravity(signal) * (signal.t - maxTime[i]) return ((maxVal, maxTime), (maxVal, maxTime)) return foldp(maxer, None) def boopValue(t2, maxes): maxVal, maxTime = maxes return numpy.array([math.exp(-(t2 - t1) * 9) for t1 in maxTime]) def blend(f, rate=lambda s, val, avg: 0.3): def blender(signal, avg): vals = f(signal) l = len(vals) # None is the starting value if avg is None: avg = [0] * l for i in range(0, l): if isinstance(rate, float): r = rate elif hasattr(rate, '__call__'): r = rate(signal, vals[i], avg[i]) else: ValueError("rate of decay must be a float or a lambda") r = adjustRate(r, signal) # adjust based on fps avg[i] = avg[i] * r + vals[i] * (1-r) avg = numpy.array(avg) return (avg, avg) # required by foldp return foldp(blender, None) def adjustRate(r, signal): # THANKS MILKDROP! FOR EVERYTHING! pow = math.pow return pow(pow(r, signal.max_fps), 1.0/signal.fps)
/src/phosphene/util.py
import numpy from threading import Thread # this is for the repl __all__ = ['memoize', 'memoizeBy', 'numpymap', 'indexable', 'reverse'] # Helper functions def memoize(f, key=None): mem = {} def g(*args): k = str(args) if mem.has_key(k): return mem[k] else: r = f(*args) mem[k] = r return r return g def memoizeBy(f, x, *args): # memoize by something else. return memoize(lambda k: f(*args))(x) def numpymap(f, X): " returns a numpy array after maping " return numpy.array(map(f, X)) def indexable(f, offset=0): " make a list-like object " if not hasattr(f, '__call__'): # XXX: Assuming f is a sequence type try: f[0] except: raise "Are you sure what you are trying" + \ "to make indexable is a function or" + \ "a sequence type?" g = f f = lambda i: g[i] # LOL class Indexable: def getFunction(self): return f def __getitem__(self, *i): if len(i) == 1: i = i[0] if isinstance(i, int): return f(i + offset) # Handle range queries elif isinstance(i, slice): return [f(j + offset) for j in \ range(i.start, i.stop, 1 if i.step is None else 0)] else: raise "You will have to implement that crazy indexing." def __len__(self): return 0 return Indexable() def windowedMap(f, samples, width, overlap): return res def reverse(l): m = [c for c in l] m.reverse() return m
/src/setup.py
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name = "phosphene", version = "0.0.1", author = "Shashi Gowda", author_email = "shashigowda91@gmail.com", description = ("A library for music processing and visualization"), license = "MIT", keywords = "music audio dsp visualization", url = "https://github.com/shashi/phosphene", packages=["phosphene"], long_description=read("../README.md"), classifiers=[ "Development Status :: 3 - Alpha", "Topic :: Multimedia :: Sound/Audio :: Analysis", "License :: OSI Approved :: MIT License", ], )
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
stvncrn/stockx_api_ref
refs/heads/master
{"/sdk/python/lib/build/lib/io_stockx/models/__init__.py": ["/sdk/python/lib/io_stockx/models/customer_object_merchant.py", "/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_request.py", "/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_product_shipping.py", "/sdk/python/lib/io_stockx/models/address_object.py", "/sdk/python/lib/io_stockx/models/search_hit.py", "/sdk/python/lib/io_stockx/models/portfolio_request_portfolio_item.py", "/sdk/python/lib/build/lib/io_stockx/models/billing_object.py", "/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item.py", "/sdk/python/lib/build/lib/io_stockx/models/search_results.py", "/sdk/python/lib/io_stockx/models/product_info_product_attributes.py", "/sdk/python/lib/build/lib/io_stockx/models/product_info_attributes.py", "/sdk/python/lib/io_stockx/models/market_data_market.py", "/sdk/python/lib/build/lib/io_stockx/models/portfolioitems_id_get_response_portfolio_item_product.py", "/sdk/python/lib/io_stockx/models/customer_object.py", "/sdk/python/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_merchant.py", "/sdk/python/lib/build/lib/io_stockx/models/search_hit_searchable_traits.py"], "/sdk/python/lib/io_stockx/models/customer_object.py": ["/sdk/python/lib/io_stockx/models/customer_object_merchant.py", "/sdk/python/lib/build/lib/io_stockx/models/billing_object.py"], "/sdk/python/lib/build/lib/io_stockx/models/portfolioitems_id_get_response_portfolio_item_product.py": ["/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_product_shipping.py"], "/sdk/python/lib/build/lib/io_stockx/models/billing_object.py": ["/sdk/python/lib/io_stockx/models/address_object.py"], "/sdk/python/lib/test/test_stock_x_api.py": ["/sdk/python/lib/build/lib/io_stockx/api/stock_x_api.py"], "/sdk/python/lib/build/lib/io_stockx/models/search_results.py": ["/sdk/python/lib/io_stockx/models/search_hit.py"], "/sdk/python/src/login.py": ["/sdk/python/src/example_constants.py"], "/sdk/python/src/place_new_lowest_ask_example.py": ["/sdk/python/src/example_constants.py"], "/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item.py": ["/sdk/python/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_merchant.py"], "/sdk/python/lib/io_stockx/models/search_hit.py": ["/sdk/python/lib/build/lib/io_stockx/models/search_hit_searchable_traits.py"]}
└── └── sdk └── python ├── lib │ ├── build │ │ └── lib │ │ ├── io_stockx │ │ │ ├── api │ │ │ │ └── stock_x_api.py │ │ │ └── models │ │ │ ├── __init__.py │ │ │ ├── billing_object.py │ │ │ ├── portfolio_id_del_request.py │ │ │ ├── portfolio_id_del_response_portfolio_item.py │ │ │ ├── portfolio_id_del_response_portfolio_item_product_shipping.py │ │ │ ├── portfolioitems_id_get_response_portfolio_item_product.py │ │ │ ├── product_info_attributes.py │ │ │ ├── search_hit_searchable_traits.py │ │ │ └── search_results.py │ │ └── test │ │ └── test_portfolio_id_del_response_portfolio_item_product_media.py │ ├── io_stockx │ │ └── models │ │ ├── address_object.py │ │ ├── customer_object.py │ │ ├── customer_object_merchant.py │ │ ├── market_data_market.py │ │ ├── portfolio_id_del_response_portfolio_item_merchant.py │ │ ├── portfolio_request_portfolio_item.py │ │ ├── product_info_product_attributes.py │ │ └── search_hit.py │ └── test │ └── test_stock_x_api.py └── src ├── example_constants.py ├── login.py └── place_new_lowest_ask_example.py
/sdk/python/lib/build/lib/io_stockx/api/stock_x_api.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from io_stockx.api_client import ApiClient class StockXApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def delete_portfolio(self, id, portfolio, **kwargs): # noqa: E501 """Deletes a portfolio item from the market with the specified id. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_portfolio(id, portfolio, async=True) >>> result = thread.get() :param async bool :param str id: The id of the portfolio item to delete. (required) :param PortfolioIdDelRequest portfolio: The request information for the portfolio delete operation. (required) :return: PortfolioIdDelResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_portfolio_with_http_info(id, portfolio, **kwargs) # noqa: E501 else: (data) = self.delete_portfolio_with_http_info(id, portfolio, **kwargs) # noqa: E501 return data def delete_portfolio_with_http_info(self, id, portfolio, **kwargs): # noqa: E501 """Deletes a portfolio item from the market with the specified id. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_portfolio_with_http_info(id, portfolio, async=True) >>> result = thread.get() :param async bool :param str id: The id of the portfolio item to delete. (required) :param PortfolioIdDelRequest portfolio: The request information for the portfolio delete operation. (required) :return: PortfolioIdDelResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'portfolio'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_portfolio" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_portfolio`") # noqa: E501 # verify the required parameter 'portfolio' is set if ('portfolio' not in params or params['portfolio'] is None): raise ValueError("Missing the required parameter `portfolio` when calling `delete_portfolio`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'portfolio' in params: body_params = params['portfolio'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/v1/portfolio/{id}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PortfolioIdDelResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_webhook(self, id, **kwargs): # noqa: E501 """delete_webhook # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_webhook(id, async=True) >>> result = thread.get() :param async bool :param str id: (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.delete_webhook_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.delete_webhook_with_http_info(id, **kwargs) # noqa: E501 return data def delete_webhook_with_http_info(self, id, **kwargs): # noqa: E501 """delete_webhook # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_webhook_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param str id: (required) :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_webhook" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `delete_webhook`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/webhook/v1/webhooks/{id}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_open_orders(self, id, **kwargs): # noqa: E501 """get_open_orders # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_open_orders(id, async=True) >>> result = thread.get() :param async bool :param str id: The customer id to lookup open orders with. (required) :return: CustomersIdSellingCurrent If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_open_orders_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_open_orders_with_http_info(id, **kwargs) # noqa: E501 return data def get_open_orders_with_http_info(self, id, **kwargs): # noqa: E501 """get_open_orders # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_open_orders_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param str id: The customer id to lookup open orders with. (required) :return: CustomersIdSellingCurrent If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_open_orders" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `get_open_orders`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['JWT', 'api_key'] # noqa: E501 return self.api_client.call_api( '/v1/customers/{id}/selling/current', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CustomersIdSellingCurrent', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_portfolio(self, portfolio, **kwargs): # noqa: E501 """Returns a market portfolio identified by request parameters. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_portfolio(portfolio, async=True) >>> result = thread.get() :param async bool :param PortfolioRequest portfolio: Requests parameters for looking up a market portfolio. (required) :return: PortfolioResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_portfolio_with_http_info(portfolio, **kwargs) # noqa: E501 else: (data) = self.get_portfolio_with_http_info(portfolio, **kwargs) # noqa: E501 return data def get_portfolio_with_http_info(self, portfolio, **kwargs): # noqa: E501 """Returns a market portfolio identified by request parameters. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_portfolio_with_http_info(portfolio, async=True) >>> result = thread.get() :param async bool :param PortfolioRequest portfolio: Requests parameters for looking up a market portfolio. (required) :return: PortfolioResponse If the method is called asynchronously, returns the request thread. """ all_params = ['portfolio'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_portfolio" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'portfolio' is set if ('portfolio' not in params or params['portfolio'] is None): raise ValueError("Missing the required parameter `portfolio` when calling `get_portfolio`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'portfolio' in params: body_params = params['portfolio'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/v1/portfolio', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PortfolioResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_portfolio_item(self, id, **kwargs): # noqa: E501 """get_portfolio_item # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_portfolio_item(id, async=True) >>> result = thread.get() :param async bool :param str id: The id of the portfolio item to lookup. (required) :return: PortfolioitemsIdGetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_portfolio_item_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_portfolio_item_with_http_info(id, **kwargs) # noqa: E501 return data def get_portfolio_item_with_http_info(self, id, **kwargs): # noqa: E501 """get_portfolio_item # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_portfolio_item_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param str id: The id of the portfolio item to lookup. (required) :return: PortfolioitemsIdGetResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_portfolio_item" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `get_portfolio_item`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['JWT', 'api_key'] # noqa: E501 return self.api_client.call_api( '/v1/portfolioitems/{id}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PortfolioitemsIdGetResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_product_by_id(self, id, **kwargs): # noqa: E501 """get_product_by_id # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_product_by_id(id, async=True) >>> result = thread.get() :param async bool :param str id: The id of the product to return. (required) :param str include: :return: ProductResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_product_by_id_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_product_by_id_with_http_info(id, **kwargs) # noqa: E501 return data def get_product_by_id_with_http_info(self, id, **kwargs): # noqa: E501 """get_product_by_id # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_product_by_id_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param str id: The id of the product to return. (required) :param str include: :return: ProductResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id', 'include'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_product_by_id" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `get_product_by_id`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] if 'include' in params: query_params.append(('include', params['include'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['JWT', 'api_key'] # noqa: E501 return self.api_client.call_api( '/v1/products/{id}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProductResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_product_market_data(self, product_id, **kwargs): # noqa: E501 """Provides historical market data for a given product. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_product_market_data(product_id, async=True) >>> result = thread.get() :param async bool :param str product_id: The product's product UUID (required) :param str sku: The product's SKU :return: MarketData If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_product_market_data_with_http_info(product_id, **kwargs) # noqa: E501 else: (data) = self.get_product_market_data_with_http_info(product_id, **kwargs) # noqa: E501 return data def get_product_market_data_with_http_info(self, product_id, **kwargs): # noqa: E501 """Provides historical market data for a given product. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_product_market_data_with_http_info(product_id, async=True) >>> result = thread.get() :param async bool :param str product_id: The product's product UUID (required) :param str sku: The product's SKU :return: MarketData If the method is called asynchronously, returns the request thread. """ all_params = ['product_id', 'sku'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_product_market_data" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_id' is set if ('product_id' not in params or params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `get_product_market_data`") # noqa: E501 collection_formats = {} path_params = {} if 'product_id' in params: path_params['productId'] = params['product_id'] # noqa: E501 query_params = [] if 'sku' in params: query_params.append(('sku', params['sku'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['JWT', 'api_key'] # noqa: E501 return self.api_client.call_api( '/v1/products/{productId}/market', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='MarketData', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_subscriptions(self, **kwargs): # noqa: E501 """get_subscriptions # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_subscriptions(async=True) >>> result = thread.get() :param async bool :return: SubscriptionsResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_subscriptions_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_subscriptions_with_http_info(**kwargs) # noqa: E501 return data def get_subscriptions_with_http_info(self, **kwargs): # noqa: E501 """get_subscriptions # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_subscriptions_with_http_info(async=True) >>> result = thread.get() :param async bool :return: SubscriptionsResponse If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_subscriptions" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/webhook/v1/subscriptions', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SubscriptionsResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_webhook(self, id, **kwargs): # noqa: E501 """get_webhook # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_webhook(id, async=True) >>> result = thread.get() :param async bool :param str id: (required) :return: WebhooksIdGetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_webhook_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.get_webhook_with_http_info(id, **kwargs) # noqa: E501 return data def get_webhook_with_http_info(self, id, **kwargs): # noqa: E501 """get_webhook # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_webhook_with_http_info(id, async=True) >>> result = thread.get() :param async bool :param str id: (required) :return: WebhooksIdGetResponse If the method is called asynchronously, returns the request thread. """ all_params = ['id'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_webhook" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'id' is set if ('id' not in params or params['id'] is None): raise ValueError("Missing the required parameter `id` when calling `get_webhook`") # noqa: E501 collection_formats = {} path_params = {} if 'id' in params: path_params['id'] = params['id'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/webhook/v1/webhooks/{id}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WebhooksIdGetResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_webhooks(self, **kwargs): # noqa: E501 """get_webhooks # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_webhooks(async=True) >>> result = thread.get() :param async bool :return: WebhooksGetResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.get_webhooks_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_webhooks_with_http_info(**kwargs) # noqa: E501 return data def get_webhooks_with_http_info(self, **kwargs): # noqa: E501 """get_webhooks # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_webhooks_with_http_info(async=True) >>> result = thread.get() :param async bool :return: WebhooksGetResponse If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_webhooks" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/webhook/v1/webhooks', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WebhooksGetResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def login(self, login, **kwargs): # noqa: E501 """Attempts to log the user in with a username and password. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.login(login, async=True) >>> result = thread.get() :param async bool :param LoginRequest login: Object that contains the user's authentication credentials.' (required) :return: LoginResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.login_with_http_info(login, **kwargs) # noqa: E501 else: (data) = self.login_with_http_info(login, **kwargs) # noqa: E501 return data def login_with_http_info(self, login, **kwargs): # noqa: E501 """Attempts to log the user in with a username and password. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.login_with_http_info(login, async=True) >>> result = thread.get() :param async bool :param LoginRequest login: Object that contains the user's authentication credentials.' (required) :return: LoginResponse If the method is called asynchronously, returns the request thread. """ all_params = ['login'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method login" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'login' is set if ('login' not in params or params['login'] is None): raise ValueError("Missing the required parameter `login` when calling `login`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'login' in params: body_params = params['login'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/v1/login', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LoginResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def lookup_product(self, **kwargs): # noqa: E501 """lookup_product # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.lookup_product(async=True) >>> result = thread.get() :param async bool :param str identifier: The product identifier to lookup, e.g. (air-jordan-1-retro-high-off-white-chicago) :param str size: The size of the product. :return: ProductInfo If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.lookup_product_with_http_info(**kwargs) # noqa: E501 else: (data) = self.lookup_product_with_http_info(**kwargs) # noqa: E501 return data def lookup_product_with_http_info(self, **kwargs): # noqa: E501 """lookup_product # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.lookup_product_with_http_info(async=True) >>> result = thread.get() :param async bool :param str identifier: The product identifier to lookup, e.g. (air-jordan-1-retro-high-off-white-chicago) :param str size: The size of the product. :return: ProductInfo If the method is called asynchronously, returns the request thread. """ all_params = ['identifier', 'size'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method lookup_product" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'identifier' in params: query_params.append(('identifier', params['identifier'])) # noqa: E501 if 'size' in params: query_params.append(('size', params['size'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['JWT', 'api_key'] # noqa: E501 return self.api_client.call_api( '/product/lookup', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProductInfo', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def new_portfolio_ask(self, portfolio, **kwargs): # noqa: E501 """Creates a new seller ask on the market for a given product. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.new_portfolio_ask(portfolio, async=True) >>> result = thread.get() :param async bool :param PortfolioRequest portfolio: The portfolio request representing the ask to place on the market. (required) :return: PortfolioResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.new_portfolio_ask_with_http_info(portfolio, **kwargs) # noqa: E501 else: (data) = self.new_portfolio_ask_with_http_info(portfolio, **kwargs) # noqa: E501 return data def new_portfolio_ask_with_http_info(self, portfolio, **kwargs): # noqa: E501 """Creates a new seller ask on the market for a given product. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.new_portfolio_ask_with_http_info(portfolio, async=True) >>> result = thread.get() :param async bool :param PortfolioRequest portfolio: The portfolio request representing the ask to place on the market. (required) :return: PortfolioResponse If the method is called asynchronously, returns the request thread. """ all_params = ['portfolio'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method new_portfolio_ask" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'portfolio' is set if ('portfolio' not in params or params['portfolio'] is None): raise ValueError("Missing the required parameter `portfolio` when calling `new_portfolio_ask`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'portfolio' in params: body_params = params['portfolio'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/v1/portfolio/ask', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PortfolioResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def new_portfolio_bid(self, portfolio, **kwargs): # noqa: E501 """Creates a new buyer bid on the market for a given product. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.new_portfolio_bid(portfolio, async=True) >>> result = thread.get() :param async bool :param PortfolioRequest portfolio: The portfolio request representing the bid to place on the market. (required) :return: PortfolioResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.new_portfolio_bid_with_http_info(portfolio, **kwargs) # noqa: E501 else: (data) = self.new_portfolio_bid_with_http_info(portfolio, **kwargs) # noqa: E501 return data def new_portfolio_bid_with_http_info(self, portfolio, **kwargs): # noqa: E501 """Creates a new buyer bid on the market for a given product. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.new_portfolio_bid_with_http_info(portfolio, async=True) >>> result = thread.get() :param async bool :param PortfolioRequest portfolio: The portfolio request representing the bid to place on the market. (required) :return: PortfolioResponse If the method is called asynchronously, returns the request thread. """ all_params = ['portfolio'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method new_portfolio_bid" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'portfolio' is set if ('portfolio' not in params or params['portfolio'] is None): raise ValueError("Missing the required parameter `portfolio` when calling `new_portfolio_bid`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'portfolio' in params: body_params = params['portfolio'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/v1/portfolio/bid', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='PortfolioResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def post_webhooks(self, portfolio, **kwargs): # noqa: E501 """post_webhooks # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_webhooks(portfolio, async=True) >>> result = thread.get() :param async bool :param WebhooksPostRequest portfolio: (required) :return: WebhooksPostResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.post_webhooks_with_http_info(portfolio, **kwargs) # noqa: E501 else: (data) = self.post_webhooks_with_http_info(portfolio, **kwargs) # noqa: E501 return data def post_webhooks_with_http_info(self, portfolio, **kwargs): # noqa: E501 """post_webhooks # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.post_webhooks_with_http_info(portfolio, async=True) >>> result = thread.get() :param async bool :param WebhooksPostRequest portfolio: (required) :return: WebhooksPostResponse If the method is called asynchronously, returns the request thread. """ all_params = ['portfolio'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method post_webhooks" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'portfolio' is set if ('portfolio' not in params or params['portfolio'] is None): raise ValueError("Missing the required parameter `portfolio` when calling `post_webhooks`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'portfolio' in params: body_params = params['portfolio'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['api_key'] # noqa: E501 return self.api_client.call_api( '/webhook/v1/webhooks', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='WebhooksPostResponse', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def search(self, query, **kwargs): # noqa: E501 """Searches for products by keyword. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.search(query, async=True) >>> result = thread.get() :param async bool :param str query: The phrase or keyword to search with. (required) :return: SearchResults If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.search_with_http_info(query, **kwargs) # noqa: E501 else: (data) = self.search_with_http_info(query, **kwargs) # noqa: E501 return data def search_with_http_info(self, query, **kwargs): # noqa: E501 """Searches for products by keyword. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.search_with_http_info(query, async=True) >>> result = thread.get() :param async bool :param str query: The phrase or keyword to search with. (required) :return: SearchResults If the method is called asynchronously, returns the request thread. """ all_params = ['query'] # noqa: E501 all_params.append('async') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method search" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'query' is set if ('query' not in params or params['query'] is None): raise ValueError("Missing the required parameter `query` when calling `search`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'query' in params: query_params.append(('query', params['query'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['JWT', 'api_key'] # noqa: E501 return self.api_client.call_api( '/v2/search', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SearchResults', # noqa: E501 auth_settings=auth_settings, async=params.get('async'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
/sdk/python/lib/build/lib/io_stockx/models/__init__.py
# coding: utf-8 # flake8: noqa """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import # import models into model package from io_stockx.models.address_object import AddressObject from io_stockx.models.billing_object import BillingObject from io_stockx.models.customer_object import CustomerObject from io_stockx.models.customer_object_merchant import CustomerObjectMerchant from io_stockx.models.customer_object_security import CustomerObjectSecurity from io_stockx.models.customer_object_shipping import CustomerObjectShipping from io_stockx.models.customers_id_selling_current import CustomersIdSellingCurrent from io_stockx.models.customers_id_selling_current_pagination import CustomersIdSellingCurrentPagination from io_stockx.models.customers_id_selling_current_paging import CustomersIdSellingCurrentPaging from io_stockx.models.login_request import LoginRequest from io_stockx.models.login_response import LoginResponse from io_stockx.models.market_data import MarketData from io_stockx.models.market_data_market import MarketDataMarket from io_stockx.models.portfolio_id_del_request import PortfolioIdDelRequest from io_stockx.models.portfolio_id_del_response import PortfolioIdDelResponse from io_stockx.models.portfolio_id_del_response_portfolio_item import PortfolioIdDelResponsePortfolioItem from io_stockx.models.portfolio_id_del_response_portfolio_item_merchant import PortfolioIdDelResponsePortfolioItemMerchant from io_stockx.models.portfolio_id_del_response_portfolio_item_product import PortfolioIdDelResponsePortfolioItemProduct from io_stockx.models.portfolio_id_del_response_portfolio_item_product_market import PortfolioIdDelResponsePortfolioItemProductMarket from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia from io_stockx.models.portfolio_id_del_response_portfolio_item_product_meta import PortfolioIdDelResponsePortfolioItemProductMeta from io_stockx.models.portfolio_id_del_response_portfolio_item_product_shipping import PortfolioIdDelResponsePortfolioItemProductShipping from io_stockx.models.portfolio_id_del_response_portfolio_item_tracking import PortfolioIdDelResponsePortfolioItemTracking from io_stockx.models.portfolio_request import PortfolioRequest from io_stockx.models.portfolio_request_portfolio_item import PortfolioRequestPortfolioItem from io_stockx.models.portfolio_response import PortfolioResponse from io_stockx.models.portfolio_response_portfolio_item import PortfolioResponsePortfolioItem from io_stockx.models.portfolio_response_portfolio_item_product import PortfolioResponsePortfolioItemProduct from io_stockx.models.portfolio_response_portfolio_item_product_market import PortfolioResponsePortfolioItemProductMarket from io_stockx.models.portfolio_response_portfolio_item_product_media import PortfolioResponsePortfolioItemProductMedia from io_stockx.models.portfolio_response_portfolio_item_tracking import PortfolioResponsePortfolioItemTracking from io_stockx.models.portfolioitems_id_get_response import PortfolioitemsIdGetResponse from io_stockx.models.portfolioitems_id_get_response_portfolio_item import PortfolioitemsIdGetResponsePortfolioItem from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product import PortfolioitemsIdGetResponsePortfolioItemProduct from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product_market import PortfolioitemsIdGetResponsePortfolioItemProductMarket from io_stockx.models.product_info import ProductInfo from io_stockx.models.product_info_attributes import ProductInfoAttributes from io_stockx.models.product_info_attributes_traits import ProductInfoAttributesTraits from io_stockx.models.product_info_data import ProductInfoData from io_stockx.models.product_info_meta import ProductInfoMeta from io_stockx.models.product_info_product import ProductInfoProduct from io_stockx.models.product_info_product_attributes import ProductInfoProductAttributes from io_stockx.models.product_lookup_response import ProductLookupResponse from io_stockx.models.product_response import ProductResponse from io_stockx.models.product_response_product import ProductResponseProduct from io_stockx.models.product_response_product_children import ProductResponseProductChildren from io_stockx.models.product_response_product_children_productid import ProductResponseProductChildrenPRODUCTID from io_stockx.models.product_response_product_children_productid_market import ProductResponseProductChildrenPRODUCTIDMarket from io_stockx.models.product_response_product_media import ProductResponseProductMedia from io_stockx.models.product_response_product_meta import ProductResponseProductMeta from io_stockx.models.search_hit import SearchHit from io_stockx.models.search_hit_media import SearchHitMedia from io_stockx.models.search_hit_searchable_traits import SearchHitSearchableTraits from io_stockx.models.search_results import SearchResults from io_stockx.models.subscriptions_response import SubscriptionsResponse from io_stockx.models.webhooks_get_response import WebhooksGetResponse from io_stockx.models.webhooks_id_get_response import WebhooksIdGetResponse from io_stockx.models.webhooks_post_request import WebhooksPostRequest from io_stockx.models.webhooks_post_response import WebhooksPostResponse
/sdk/python/lib/build/lib/io_stockx/models/billing_object.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.address_object import AddressObject # noqa: F401,E501 class BillingObject(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'card_type': 'str', 'token': 'str', 'last4': 'str', 'account_email': 'str', 'expiration_date': 'str', 'cardholder_name': 'str', 'address': 'AddressObject' } attribute_map = { 'card_type': 'cardType', 'token': 'token', 'last4': 'last4', 'account_email': 'accountEmail', 'expiration_date': 'expirationDate', 'cardholder_name': 'cardholderName', 'address': 'Address' } def __init__(self, card_type=None, token=None, last4=None, account_email=None, expiration_date=None, cardholder_name=None, address=None): # noqa: E501 """BillingObject - a model defined in Swagger""" # noqa: E501 self._card_type = None self._token = None self._last4 = None self._account_email = None self._expiration_date = None self._cardholder_name = None self._address = None self.discriminator = None if card_type is not None: self.card_type = card_type if token is not None: self.token = token if last4 is not None: self.last4 = last4 if account_email is not None: self.account_email = account_email if expiration_date is not None: self.expiration_date = expiration_date if cardholder_name is not None: self.cardholder_name = cardholder_name if address is not None: self.address = address @property def card_type(self): """Gets the card_type of this BillingObject. # noqa: E501 :return: The card_type of this BillingObject. # noqa: E501 :rtype: str """ return self._card_type @card_type.setter def card_type(self, card_type): """Sets the card_type of this BillingObject. :param card_type: The card_type of this BillingObject. # noqa: E501 :type: str """ self._card_type = card_type @property def token(self): """Gets the token of this BillingObject. # noqa: E501 :return: The token of this BillingObject. # noqa: E501 :rtype: str """ return self._token @token.setter def token(self, token): """Sets the token of this BillingObject. :param token: The token of this BillingObject. # noqa: E501 :type: str """ self._token = token @property def last4(self): """Gets the last4 of this BillingObject. # noqa: E501 :return: The last4 of this BillingObject. # noqa: E501 :rtype: str """ return self._last4 @last4.setter def last4(self, last4): """Sets the last4 of this BillingObject. :param last4: The last4 of this BillingObject. # noqa: E501 :type: str """ self._last4 = last4 @property def account_email(self): """Gets the account_email of this BillingObject. # noqa: E501 :return: The account_email of this BillingObject. # noqa: E501 :rtype: str """ return self._account_email @account_email.setter def account_email(self, account_email): """Sets the account_email of this BillingObject. :param account_email: The account_email of this BillingObject. # noqa: E501 :type: str """ self._account_email = account_email @property def expiration_date(self): """Gets the expiration_date of this BillingObject. # noqa: E501 :return: The expiration_date of this BillingObject. # noqa: E501 :rtype: str """ return self._expiration_date @expiration_date.setter def expiration_date(self, expiration_date): """Sets the expiration_date of this BillingObject. :param expiration_date: The expiration_date of this BillingObject. # noqa: E501 :type: str """ self._expiration_date = expiration_date @property def cardholder_name(self): """Gets the cardholder_name of this BillingObject. # noqa: E501 :return: The cardholder_name of this BillingObject. # noqa: E501 :rtype: str """ return self._cardholder_name @cardholder_name.setter def cardholder_name(self, cardholder_name): """Sets the cardholder_name of this BillingObject. :param cardholder_name: The cardholder_name of this BillingObject. # noqa: E501 :type: str """ self._cardholder_name = cardholder_name @property def address(self): """Gets the address of this BillingObject. # noqa: E501 :return: The address of this BillingObject. # noqa: E501 :rtype: AddressObject """ return self._address @address.setter def address(self, address): """Sets the address of this BillingObject. :param address: The address of this BillingObject. # noqa: E501 :type: AddressObject """ self._address = address def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BillingObject): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_request.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PortfolioIdDelRequest(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'chain_id': 'str', 'notes': 'str' } attribute_map = { 'chain_id': 'chain_id', 'notes': 'notes' } def __init__(self, chain_id=None, notes=None): # noqa: E501 """PortfolioIdDelRequest - a model defined in Swagger""" # noqa: E501 self._chain_id = None self._notes = None self.discriminator = None self.chain_id = chain_id self.notes = notes @property def chain_id(self): """Gets the chain_id of this PortfolioIdDelRequest. # noqa: E501 :return: The chain_id of this PortfolioIdDelRequest. # noqa: E501 :rtype: str """ return self._chain_id @chain_id.setter def chain_id(self, chain_id): """Sets the chain_id of this PortfolioIdDelRequest. :param chain_id: The chain_id of this PortfolioIdDelRequest. # noqa: E501 :type: str """ if chain_id is None: raise ValueError("Invalid value for `chain_id`, must not be `None`") # noqa: E501 self._chain_id = chain_id @property def notes(self): """Gets the notes of this PortfolioIdDelRequest. # noqa: E501 :return: The notes of this PortfolioIdDelRequest. # noqa: E501 :rtype: str """ return self._notes @notes.setter def notes(self, notes): """Sets the notes of this PortfolioIdDelRequest. :param notes: The notes of this PortfolioIdDelRequest. # noqa: E501 :type: str """ if notes is None: raise ValueError("Invalid value for `notes`, must not be `None`") # noqa: E501 self._notes = notes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PortfolioIdDelRequest): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.portfolio_id_del_response_portfolio_item_merchant import PortfolioIdDelResponsePortfolioItemMerchant # noqa: F401,E501 from io_stockx.models.portfolio_id_del_response_portfolio_item_product import PortfolioIdDelResponsePortfolioItemProduct # noqa: F401,E501 from io_stockx.models.portfolio_id_del_response_portfolio_item_tracking import PortfolioIdDelResponsePortfolioItemTracking # noqa: F401,E501 class PortfolioIdDelResponsePortfolioItem(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'chain_id': 'str', 'customer_id': 'int', 'inventory_id': 'str', 'product_id': 'str', 'sku_uuid': 'str', 'merchant_id': 'int', 'condition': 'int', 'action': 'int', 'action_by': 'int', 'amount': 'int', 'expires_at': 'str', 'expires_at_time': 'int', 'gain_loss_dollars': 'int', 'gain_loss_percentage': 'int', 'market_value': 'str', 'matched_state': 'int', 'purchase_date': 'str', 'purchase_date_time': 'int', 'state': 'int', 'text': 'str', 'notes': 'str', 'created_at_time': 'int', 'can_edit': 'bool', 'can_delete': 'bool', 'tracking': 'PortfolioIdDelResponsePortfolioItemTracking', 'meta': 'object', 'product': 'PortfolioIdDelResponsePortfolioItemProduct', 'merchant': 'PortfolioIdDelResponsePortfolioItemMerchant' } attribute_map = { 'chain_id': 'chainId', 'customer_id': 'customerId', 'inventory_id': 'inventoryId', 'product_id': 'productId', 'sku_uuid': 'skuUuid', 'merchant_id': 'merchantId', 'condition': 'condition', 'action': 'action', 'action_by': 'actionBy', 'amount': 'amount', 'expires_at': 'expiresAt', 'expires_at_time': 'expiresAtTime', 'gain_loss_dollars': 'gainLossDollars', 'gain_loss_percentage': 'gainLossPercentage', 'market_value': 'marketValue', 'matched_state': 'matchedState', 'purchase_date': 'purchaseDate', 'purchase_date_time': 'purchaseDateTime', 'state': 'state', 'text': 'text', 'notes': 'notes', 'created_at_time': 'createdAtTime', 'can_edit': 'canEdit', 'can_delete': 'canDelete', 'tracking': 'Tracking', 'meta': 'meta', 'product': 'product', 'merchant': 'Merchant' } def __init__(self, chain_id=None, customer_id=None, inventory_id=None, product_id=None, sku_uuid=None, merchant_id=None, condition=None, action=None, action_by=None, amount=None, expires_at=None, expires_at_time=None, gain_loss_dollars=None, gain_loss_percentage=None, market_value=None, matched_state=None, purchase_date=None, purchase_date_time=None, state=None, text=None, notes=None, created_at_time=None, can_edit=None, can_delete=None, tracking=None, meta=None, product=None, merchant=None): # noqa: E501 """PortfolioIdDelResponsePortfolioItem - a model defined in Swagger""" # noqa: E501 self._chain_id = None self._customer_id = None self._inventory_id = None self._product_id = None self._sku_uuid = None self._merchant_id = None self._condition = None self._action = None self._action_by = None self._amount = None self._expires_at = None self._expires_at_time = None self._gain_loss_dollars = None self._gain_loss_percentage = None self._market_value = None self._matched_state = None self._purchase_date = None self._purchase_date_time = None self._state = None self._text = None self._notes = None self._created_at_time = None self._can_edit = None self._can_delete = None self._tracking = None self._meta = None self._product = None self._merchant = None self.discriminator = None self.chain_id = chain_id self.customer_id = customer_id self.inventory_id = inventory_id self.product_id = product_id self.sku_uuid = sku_uuid self.merchant_id = merchant_id self.condition = condition self.action = action self.action_by = action_by self.amount = amount self.expires_at = expires_at self.expires_at_time = expires_at_time self.gain_loss_dollars = gain_loss_dollars self.gain_loss_percentage = gain_loss_percentage self.market_value = market_value self.matched_state = matched_state self.purchase_date = purchase_date self.purchase_date_time = purchase_date_time self.state = state self.text = text self.notes = notes self.created_at_time = created_at_time self.can_edit = can_edit self.can_delete = can_delete self.tracking = tracking self.meta = meta self.product = product self.merchant = merchant @property def chain_id(self): """Gets the chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._chain_id @chain_id.setter def chain_id(self, chain_id): """Sets the chain_id of this PortfolioIdDelResponsePortfolioItem. :param chain_id: The chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if chain_id is None: raise ValueError("Invalid value for `chain_id`, must not be `None`") # noqa: E501 self._chain_id = chain_id @property def customer_id(self): """Gets the customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._customer_id @customer_id.setter def customer_id(self, customer_id): """Sets the customer_id of this PortfolioIdDelResponsePortfolioItem. :param customer_id: The customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if customer_id is None: raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501 self._customer_id = customer_id @property def inventory_id(self): """Gets the inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._inventory_id @inventory_id.setter def inventory_id(self, inventory_id): """Sets the inventory_id of this PortfolioIdDelResponsePortfolioItem. :param inventory_id: The inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if inventory_id is None: raise ValueError("Invalid value for `inventory_id`, must not be `None`") # noqa: E501 self._inventory_id = inventory_id @property def product_id(self): """Gets the product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._product_id @product_id.setter def product_id(self, product_id): """Sets the product_id of this PortfolioIdDelResponsePortfolioItem. :param product_id: The product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if product_id is None: raise ValueError("Invalid value for `product_id`, must not be `None`") # noqa: E501 self._product_id = product_id @property def sku_uuid(self): """Gets the sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._sku_uuid @sku_uuid.setter def sku_uuid(self, sku_uuid): """Sets the sku_uuid of this PortfolioIdDelResponsePortfolioItem. :param sku_uuid: The sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if sku_uuid is None: raise ValueError("Invalid value for `sku_uuid`, must not be `None`") # noqa: E501 self._sku_uuid = sku_uuid @property def merchant_id(self): """Gets the merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._merchant_id @merchant_id.setter def merchant_id(self, merchant_id): """Sets the merchant_id of this PortfolioIdDelResponsePortfolioItem. :param merchant_id: The merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if merchant_id is None: raise ValueError("Invalid value for `merchant_id`, must not be `None`") # noqa: E501 self._merchant_id = merchant_id @property def condition(self): """Gets the condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._condition @condition.setter def condition(self, condition): """Sets the condition of this PortfolioIdDelResponsePortfolioItem. :param condition: The condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if condition is None: raise ValueError("Invalid value for `condition`, must not be `None`") # noqa: E501 self._condition = condition @property def action(self): """Gets the action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._action @action.setter def action(self, action): """Sets the action of this PortfolioIdDelResponsePortfolioItem. :param action: The action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if action is None: raise ValueError("Invalid value for `action`, must not be `None`") # noqa: E501 self._action = action @property def action_by(self): """Gets the action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._action_by @action_by.setter def action_by(self, action_by): """Sets the action_by of this PortfolioIdDelResponsePortfolioItem. :param action_by: The action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if action_by is None: raise ValueError("Invalid value for `action_by`, must not be `None`") # noqa: E501 self._action_by = action_by @property def amount(self): """Gets the amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._amount @amount.setter def amount(self, amount): """Sets the amount of this PortfolioIdDelResponsePortfolioItem. :param amount: The amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if amount is None: raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501 self._amount = amount @property def expires_at(self): """Gets the expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._expires_at @expires_at.setter def expires_at(self, expires_at): """Sets the expires_at of this PortfolioIdDelResponsePortfolioItem. :param expires_at: The expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if expires_at is None: raise ValueError("Invalid value for `expires_at`, must not be `None`") # noqa: E501 self._expires_at = expires_at @property def expires_at_time(self): """Gets the expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._expires_at_time @expires_at_time.setter def expires_at_time(self, expires_at_time): """Sets the expires_at_time of this PortfolioIdDelResponsePortfolioItem. :param expires_at_time: The expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if expires_at_time is None: raise ValueError("Invalid value for `expires_at_time`, must not be `None`") # noqa: E501 self._expires_at_time = expires_at_time @property def gain_loss_dollars(self): """Gets the gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._gain_loss_dollars @gain_loss_dollars.setter def gain_loss_dollars(self, gain_loss_dollars): """Sets the gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. :param gain_loss_dollars: The gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if gain_loss_dollars is None: raise ValueError("Invalid value for `gain_loss_dollars`, must not be `None`") # noqa: E501 self._gain_loss_dollars = gain_loss_dollars @property def gain_loss_percentage(self): """Gets the gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._gain_loss_percentage @gain_loss_percentage.setter def gain_loss_percentage(self, gain_loss_percentage): """Sets the gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. :param gain_loss_percentage: The gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if gain_loss_percentage is None: raise ValueError("Invalid value for `gain_loss_percentage`, must not be `None`") # noqa: E501 self._gain_loss_percentage = gain_loss_percentage @property def market_value(self): """Gets the market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._market_value @market_value.setter def market_value(self, market_value): """Sets the market_value of this PortfolioIdDelResponsePortfolioItem. :param market_value: The market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if market_value is None: raise ValueError("Invalid value for `market_value`, must not be `None`") # noqa: E501 self._market_value = market_value @property def matched_state(self): """Gets the matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._matched_state @matched_state.setter def matched_state(self, matched_state): """Sets the matched_state of this PortfolioIdDelResponsePortfolioItem. :param matched_state: The matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if matched_state is None: raise ValueError("Invalid value for `matched_state`, must not be `None`") # noqa: E501 self._matched_state = matched_state @property def purchase_date(self): """Gets the purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._purchase_date @purchase_date.setter def purchase_date(self, purchase_date): """Sets the purchase_date of this PortfolioIdDelResponsePortfolioItem. :param purchase_date: The purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if purchase_date is None: raise ValueError("Invalid value for `purchase_date`, must not be `None`") # noqa: E501 self._purchase_date = purchase_date @property def purchase_date_time(self): """Gets the purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._purchase_date_time @purchase_date_time.setter def purchase_date_time(self, purchase_date_time): """Sets the purchase_date_time of this PortfolioIdDelResponsePortfolioItem. :param purchase_date_time: The purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if purchase_date_time is None: raise ValueError("Invalid value for `purchase_date_time`, must not be `None`") # noqa: E501 self._purchase_date_time = purchase_date_time @property def state(self): """Gets the state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._state @state.setter def state(self, state): """Sets the state of this PortfolioIdDelResponsePortfolioItem. :param state: The state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if state is None: raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501 self._state = state @property def text(self): """Gets the text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._text @text.setter def text(self, text): """Sets the text of this PortfolioIdDelResponsePortfolioItem. :param text: The text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if text is None: raise ValueError("Invalid value for `text`, must not be `None`") # noqa: E501 self._text = text @property def notes(self): """Gets the notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: str """ return self._notes @notes.setter def notes(self, notes): """Sets the notes of this PortfolioIdDelResponsePortfolioItem. :param notes: The notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: str """ if notes is None: raise ValueError("Invalid value for `notes`, must not be `None`") # noqa: E501 self._notes = notes @property def created_at_time(self): """Gets the created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: int """ return self._created_at_time @created_at_time.setter def created_at_time(self, created_at_time): """Sets the created_at_time of this PortfolioIdDelResponsePortfolioItem. :param created_at_time: The created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: int """ if created_at_time is None: raise ValueError("Invalid value for `created_at_time`, must not be `None`") # noqa: E501 self._created_at_time = created_at_time @property def can_edit(self): """Gets the can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: bool """ return self._can_edit @can_edit.setter def can_edit(self, can_edit): """Sets the can_edit of this PortfolioIdDelResponsePortfolioItem. :param can_edit: The can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: bool """ if can_edit is None: raise ValueError("Invalid value for `can_edit`, must not be `None`") # noqa: E501 self._can_edit = can_edit @property def can_delete(self): """Gets the can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: bool """ return self._can_delete @can_delete.setter def can_delete(self, can_delete): """Sets the can_delete of this PortfolioIdDelResponsePortfolioItem. :param can_delete: The can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: bool """ if can_delete is None: raise ValueError("Invalid value for `can_delete`, must not be `None`") # noqa: E501 self._can_delete = can_delete @property def tracking(self): """Gets the tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: PortfolioIdDelResponsePortfolioItemTracking """ return self._tracking @tracking.setter def tracking(self, tracking): """Sets the tracking of this PortfolioIdDelResponsePortfolioItem. :param tracking: The tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: PortfolioIdDelResponsePortfolioItemTracking """ if tracking is None: raise ValueError("Invalid value for `tracking`, must not be `None`") # noqa: E501 self._tracking = tracking @property def meta(self): """Gets the meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: object """ return self._meta @meta.setter def meta(self, meta): """Sets the meta of this PortfolioIdDelResponsePortfolioItem. :param meta: The meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: object """ if meta is None: raise ValueError("Invalid value for `meta`, must not be `None`") # noqa: E501 self._meta = meta @property def product(self): """Gets the product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: PortfolioIdDelResponsePortfolioItemProduct """ return self._product @product.setter def product(self, product): """Sets the product of this PortfolioIdDelResponsePortfolioItem. :param product: The product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: PortfolioIdDelResponsePortfolioItemProduct """ if product is None: raise ValueError("Invalid value for `product`, must not be `None`") # noqa: E501 self._product = product @property def merchant(self): """Gets the merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :return: The merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :rtype: PortfolioIdDelResponsePortfolioItemMerchant """ return self._merchant @merchant.setter def merchant(self, merchant): """Sets the merchant of this PortfolioIdDelResponsePortfolioItem. :param merchant: The merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501 :type: PortfolioIdDelResponsePortfolioItemMerchant """ if merchant is None: raise ValueError("Invalid value for `merchant`, must not be `None`") # noqa: E501 self._merchant = merchant def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PortfolioIdDelResponsePortfolioItem): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_product_shipping.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PortfolioIdDelResponsePortfolioItemProductShipping(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'total_days_to_ship': 'int', 'has_additional_days_to_ship': 'bool', 'delivery_days_lower_bound': 'int', 'delivery_days_upper_bound': 'int' } attribute_map = { 'total_days_to_ship': 'totalDaysToShip', 'has_additional_days_to_ship': 'hasAdditionalDaysToShip', 'delivery_days_lower_bound': 'deliveryDaysLowerBound', 'delivery_days_upper_bound': 'deliveryDaysUpperBound' } def __init__(self, total_days_to_ship=None, has_additional_days_to_ship=None, delivery_days_lower_bound=None, delivery_days_upper_bound=None): # noqa: E501 """PortfolioIdDelResponsePortfolioItemProductShipping - a model defined in Swagger""" # noqa: E501 self._total_days_to_ship = None self._has_additional_days_to_ship = None self._delivery_days_lower_bound = None self._delivery_days_upper_bound = None self.discriminator = None self.total_days_to_ship = total_days_to_ship self.has_additional_days_to_ship = has_additional_days_to_ship self.delivery_days_lower_bound = delivery_days_lower_bound self.delivery_days_upper_bound = delivery_days_upper_bound @property def total_days_to_ship(self): """Gets the total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :return: The total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :rtype: int """ return self._total_days_to_ship @total_days_to_ship.setter def total_days_to_ship(self, total_days_to_ship): """Sets the total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. :param total_days_to_ship: The total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :type: int """ if total_days_to_ship is None: raise ValueError("Invalid value for `total_days_to_ship`, must not be `None`") # noqa: E501 self._total_days_to_ship = total_days_to_ship @property def has_additional_days_to_ship(self): """Gets the has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :return: The has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :rtype: bool """ return self._has_additional_days_to_ship @has_additional_days_to_ship.setter def has_additional_days_to_ship(self, has_additional_days_to_ship): """Sets the has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. :param has_additional_days_to_ship: The has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :type: bool """ if has_additional_days_to_ship is None: raise ValueError("Invalid value for `has_additional_days_to_ship`, must not be `None`") # noqa: E501 self._has_additional_days_to_ship = has_additional_days_to_ship @property def delivery_days_lower_bound(self): """Gets the delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :return: The delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :rtype: int """ return self._delivery_days_lower_bound @delivery_days_lower_bound.setter def delivery_days_lower_bound(self, delivery_days_lower_bound): """Sets the delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. :param delivery_days_lower_bound: The delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :type: int """ if delivery_days_lower_bound is None: raise ValueError("Invalid value for `delivery_days_lower_bound`, must not be `None`") # noqa: E501 self._delivery_days_lower_bound = delivery_days_lower_bound @property def delivery_days_upper_bound(self): """Gets the delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :return: The delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :rtype: int """ return self._delivery_days_upper_bound @delivery_days_upper_bound.setter def delivery_days_upper_bound(self, delivery_days_upper_bound): """Sets the delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. :param delivery_days_upper_bound: The delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501 :type: int """ if delivery_days_upper_bound is None: raise ValueError("Invalid value for `delivery_days_upper_bound`, must not be `None`") # noqa: E501 self._delivery_days_upper_bound = delivery_days_upper_bound def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PortfolioIdDelResponsePortfolioItemProductShipping): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/portfolioitems_id_get_response_portfolio_item_product.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia # noqa: F401,E501 from io_stockx.models.portfolio_id_del_response_portfolio_item_product_meta import PortfolioIdDelResponsePortfolioItemProductMeta # noqa: F401,E501 from io_stockx.models.portfolio_id_del_response_portfolio_item_product_shipping import PortfolioIdDelResponsePortfolioItemProductShipping # noqa: F401,E501 from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product_market import PortfolioitemsIdGetResponsePortfolioItemProductMarket # noqa: F401,E501 class PortfolioitemsIdGetResponsePortfolioItemProduct(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'uuid': 'str', 'brand': 'str', 'category': 'str', 'charity_condition': 'int', 'colorway': 'str', 'condition': 'str', 'country_of_manufacture': 'str', 'gender': 'str', 'content_group': 'str', 'minimum_bid': 'int', 'media': 'PortfolioIdDelResponsePortfolioItemProductMedia', 'name': 'str', 'primary_category': 'str', 'secondary_category': 'str', 'product_category': 'str', 'release_date': 'str', 'retail_price': 'int', 'shoe': 'str', 'short_description': 'str', 'style_id': 'str', 'ticker_symbol': 'str', 'title': 'str', 'data_type': 'str', 'type': 'int', 'size_title': 'str', 'size_descriptor': 'str', 'size_all_descriptor': 'str', 'url_key': 'str', 'year': 'str', 'shipping_group': 'str', 'a_lim': 'int', 'meta': 'PortfolioIdDelResponsePortfolioItemProductMeta', 'shipping': 'PortfolioIdDelResponsePortfolioItemProductShipping', 'children': 'object', 'parent_id': 'str', 'parent_uuid': 'str', 'size_sort_order': 'int', 'shoe_size': 'str', 'market': 'PortfolioitemsIdGetResponsePortfolioItemProductMarket', 'upc': 'str' } attribute_map = { 'id': 'id', 'uuid': 'uuid', 'brand': 'brand', 'category': 'category', 'charity_condition': 'charityCondition', 'colorway': 'colorway', 'condition': 'condition', 'country_of_manufacture': 'countryOfManufacture', 'gender': 'gender', 'content_group': 'contentGroup', 'minimum_bid': 'minimumBid', 'media': 'media', 'name': 'name', 'primary_category': 'primaryCategory', 'secondary_category': 'secondaryCategory', 'product_category': 'productCategory', 'release_date': 'releaseDate', 'retail_price': 'retailPrice', 'shoe': 'shoe', 'short_description': 'shortDescription', 'style_id': 'styleId', 'ticker_symbol': 'tickerSymbol', 'title': 'title', 'data_type': 'dataType', 'type': 'type', 'size_title': 'sizeTitle', 'size_descriptor': 'sizeDescriptor', 'size_all_descriptor': 'sizeAllDescriptor', 'url_key': 'urlKey', 'year': 'year', 'shipping_group': 'shippingGroup', 'a_lim': 'aLim', 'meta': 'meta', 'shipping': 'shipping', 'children': 'children', 'parent_id': 'parentId', 'parent_uuid': 'parentUuid', 'size_sort_order': 'sizeSortOrder', 'shoe_size': 'shoeSize', 'market': 'market', 'upc': 'upc' } def __init__(self, id=None, uuid=None, brand=None, category=None, charity_condition=None, colorway=None, condition=None, country_of_manufacture=None, gender=None, content_group=None, minimum_bid=None, media=None, name=None, primary_category=None, secondary_category=None, product_category=None, release_date=None, retail_price=None, shoe=None, short_description=None, style_id=None, ticker_symbol=None, title=None, data_type=None, type=None, size_title=None, size_descriptor=None, size_all_descriptor=None, url_key=None, year=None, shipping_group=None, a_lim=None, meta=None, shipping=None, children=None, parent_id=None, parent_uuid=None, size_sort_order=None, shoe_size=None, market=None, upc=None): # noqa: E501 """PortfolioitemsIdGetResponsePortfolioItemProduct - a model defined in Swagger""" # noqa: E501 self._id = None self._uuid = None self._brand = None self._category = None self._charity_condition = None self._colorway = None self._condition = None self._country_of_manufacture = None self._gender = None self._content_group = None self._minimum_bid = None self._media = None self._name = None self._primary_category = None self._secondary_category = None self._product_category = None self._release_date = None self._retail_price = None self._shoe = None self._short_description = None self._style_id = None self._ticker_symbol = None self._title = None self._data_type = None self._type = None self._size_title = None self._size_descriptor = None self._size_all_descriptor = None self._url_key = None self._year = None self._shipping_group = None self._a_lim = None self._meta = None self._shipping = None self._children = None self._parent_id = None self._parent_uuid = None self._size_sort_order = None self._shoe_size = None self._market = None self._upc = None self.discriminator = None self.id = id self.uuid = uuid self.brand = brand self.category = category self.charity_condition = charity_condition self.colorway = colorway self.condition = condition self.country_of_manufacture = country_of_manufacture self.gender = gender self.content_group = content_group self.minimum_bid = minimum_bid self.media = media self.name = name self.primary_category = primary_category self.secondary_category = secondary_category self.product_category = product_category self.release_date = release_date self.retail_price = retail_price self.shoe = shoe self.short_description = short_description self.style_id = style_id self.ticker_symbol = ticker_symbol self.title = title self.data_type = data_type self.type = type self.size_title = size_title self.size_descriptor = size_descriptor self.size_all_descriptor = size_all_descriptor self.url_key = url_key self.year = year self.shipping_group = shipping_group self.a_lim = a_lim self.meta = meta self.shipping = shipping self.children = children self.parent_id = parent_id self.parent_uuid = parent_uuid self.size_sort_order = size_sort_order self.shoe_size = shoe_size self.market = market self.upc = upc @property def id(self): """Gets the id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param id: The id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if id is None: raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501 self._id = id @property def uuid(self): """Gets the uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param uuid: The uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if uuid is None: raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501 self._uuid = uuid @property def brand(self): """Gets the brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._brand @brand.setter def brand(self, brand): """Sets the brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param brand: The brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if brand is None: raise ValueError("Invalid value for `brand`, must not be `None`") # noqa: E501 self._brand = brand @property def category(self): """Gets the category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._category @category.setter def category(self, category): """Sets the category of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param category: The category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if category is None: raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501 self._category = category @property def charity_condition(self): """Gets the charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: int """ return self._charity_condition @charity_condition.setter def charity_condition(self, charity_condition): """Sets the charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param charity_condition: The charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: int """ if charity_condition is None: raise ValueError("Invalid value for `charity_condition`, must not be `None`") # noqa: E501 self._charity_condition = charity_condition @property def colorway(self): """Gets the colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._colorway @colorway.setter def colorway(self, colorway): """Sets the colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param colorway: The colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if colorway is None: raise ValueError("Invalid value for `colorway`, must not be `None`") # noqa: E501 self._colorway = colorway @property def condition(self): """Gets the condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._condition @condition.setter def condition(self, condition): """Sets the condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param condition: The condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if condition is None: raise ValueError("Invalid value for `condition`, must not be `None`") # noqa: E501 self._condition = condition @property def country_of_manufacture(self): """Gets the country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._country_of_manufacture @country_of_manufacture.setter def country_of_manufacture(self, country_of_manufacture): """Sets the country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param country_of_manufacture: The country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if country_of_manufacture is None: raise ValueError("Invalid value for `country_of_manufacture`, must not be `None`") # noqa: E501 self._country_of_manufacture = country_of_manufacture @property def gender(self): """Gets the gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._gender @gender.setter def gender(self, gender): """Sets the gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param gender: The gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if gender is None: raise ValueError("Invalid value for `gender`, must not be `None`") # noqa: E501 self._gender = gender @property def content_group(self): """Gets the content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._content_group @content_group.setter def content_group(self, content_group): """Sets the content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param content_group: The content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if content_group is None: raise ValueError("Invalid value for `content_group`, must not be `None`") # noqa: E501 self._content_group = content_group @property def minimum_bid(self): """Gets the minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: int """ return self._minimum_bid @minimum_bid.setter def minimum_bid(self, minimum_bid): """Sets the minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param minimum_bid: The minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: int """ if minimum_bid is None: raise ValueError("Invalid value for `minimum_bid`, must not be `None`") # noqa: E501 self._minimum_bid = minimum_bid @property def media(self): """Gets the media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: PortfolioIdDelResponsePortfolioItemProductMedia """ return self._media @media.setter def media(self, media): """Sets the media of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param media: The media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: PortfolioIdDelResponsePortfolioItemProductMedia """ if media is None: raise ValueError("Invalid value for `media`, must not be `None`") # noqa: E501 self._media = media @property def name(self): """Gets the name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param name: The name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if name is None: raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def primary_category(self): """Gets the primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._primary_category @primary_category.setter def primary_category(self, primary_category): """Sets the primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param primary_category: The primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if primary_category is None: raise ValueError("Invalid value for `primary_category`, must not be `None`") # noqa: E501 self._primary_category = primary_category @property def secondary_category(self): """Gets the secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._secondary_category @secondary_category.setter def secondary_category(self, secondary_category): """Sets the secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param secondary_category: The secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if secondary_category is None: raise ValueError("Invalid value for `secondary_category`, must not be `None`") # noqa: E501 self._secondary_category = secondary_category @property def product_category(self): """Gets the product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._product_category @product_category.setter def product_category(self, product_category): """Sets the product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param product_category: The product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if product_category is None: raise ValueError("Invalid value for `product_category`, must not be `None`") # noqa: E501 self._product_category = product_category @property def release_date(self): """Gets the release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._release_date @release_date.setter def release_date(self, release_date): """Sets the release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param release_date: The release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if release_date is None: raise ValueError("Invalid value for `release_date`, must not be `None`") # noqa: E501 self._release_date = release_date @property def retail_price(self): """Gets the retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: int """ return self._retail_price @retail_price.setter def retail_price(self, retail_price): """Sets the retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param retail_price: The retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: int """ if retail_price is None: raise ValueError("Invalid value for `retail_price`, must not be `None`") # noqa: E501 self._retail_price = retail_price @property def shoe(self): """Gets the shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._shoe @shoe.setter def shoe(self, shoe): """Sets the shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param shoe: The shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if shoe is None: raise ValueError("Invalid value for `shoe`, must not be `None`") # noqa: E501 self._shoe = shoe @property def short_description(self): """Gets the short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._short_description @short_description.setter def short_description(self, short_description): """Sets the short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param short_description: The short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if short_description is None: raise ValueError("Invalid value for `short_description`, must not be `None`") # noqa: E501 self._short_description = short_description @property def style_id(self): """Gets the style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._style_id @style_id.setter def style_id(self, style_id): """Sets the style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param style_id: The style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if style_id is None: raise ValueError("Invalid value for `style_id`, must not be `None`") # noqa: E501 self._style_id = style_id @property def ticker_symbol(self): """Gets the ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._ticker_symbol @ticker_symbol.setter def ticker_symbol(self, ticker_symbol): """Sets the ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param ticker_symbol: The ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if ticker_symbol is None: raise ValueError("Invalid value for `ticker_symbol`, must not be `None`") # noqa: E501 self._ticker_symbol = ticker_symbol @property def title(self): """Gets the title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._title @title.setter def title(self, title): """Sets the title of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param title: The title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if title is None: raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501 self._title = title @property def data_type(self): """Gets the data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._data_type @data_type.setter def data_type(self, data_type): """Sets the data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param data_type: The data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if data_type is None: raise ValueError("Invalid value for `data_type`, must not be `None`") # noqa: E501 self._data_type = data_type @property def type(self): """Gets the type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: int """ return self._type @type.setter def type(self, type): """Sets the type of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param type: The type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: int """ if type is None: raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 self._type = type @property def size_title(self): """Gets the size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._size_title @size_title.setter def size_title(self, size_title): """Sets the size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param size_title: The size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if size_title is None: raise ValueError("Invalid value for `size_title`, must not be `None`") # noqa: E501 self._size_title = size_title @property def size_descriptor(self): """Gets the size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._size_descriptor @size_descriptor.setter def size_descriptor(self, size_descriptor): """Sets the size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param size_descriptor: The size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if size_descriptor is None: raise ValueError("Invalid value for `size_descriptor`, must not be `None`") # noqa: E501 self._size_descriptor = size_descriptor @property def size_all_descriptor(self): """Gets the size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._size_all_descriptor @size_all_descriptor.setter def size_all_descriptor(self, size_all_descriptor): """Sets the size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param size_all_descriptor: The size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if size_all_descriptor is None: raise ValueError("Invalid value for `size_all_descriptor`, must not be `None`") # noqa: E501 self._size_all_descriptor = size_all_descriptor @property def url_key(self): """Gets the url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._url_key @url_key.setter def url_key(self, url_key): """Sets the url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param url_key: The url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if url_key is None: raise ValueError("Invalid value for `url_key`, must not be `None`") # noqa: E501 self._url_key = url_key @property def year(self): """Gets the year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._year @year.setter def year(self, year): """Sets the year of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param year: The year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if year is None: raise ValueError("Invalid value for `year`, must not be `None`") # noqa: E501 self._year = year @property def shipping_group(self): """Gets the shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._shipping_group @shipping_group.setter def shipping_group(self, shipping_group): """Sets the shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param shipping_group: The shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if shipping_group is None: raise ValueError("Invalid value for `shipping_group`, must not be `None`") # noqa: E501 self._shipping_group = shipping_group @property def a_lim(self): """Gets the a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: int """ return self._a_lim @a_lim.setter def a_lim(self, a_lim): """Sets the a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param a_lim: The a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: int """ if a_lim is None: raise ValueError("Invalid value for `a_lim`, must not be `None`") # noqa: E501 self._a_lim = a_lim @property def meta(self): """Gets the meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: PortfolioIdDelResponsePortfolioItemProductMeta """ return self._meta @meta.setter def meta(self, meta): """Sets the meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param meta: The meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: PortfolioIdDelResponsePortfolioItemProductMeta """ if meta is None: raise ValueError("Invalid value for `meta`, must not be `None`") # noqa: E501 self._meta = meta @property def shipping(self): """Gets the shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: PortfolioIdDelResponsePortfolioItemProductShipping """ return self._shipping @shipping.setter def shipping(self, shipping): """Sets the shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param shipping: The shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: PortfolioIdDelResponsePortfolioItemProductShipping """ if shipping is None: raise ValueError("Invalid value for `shipping`, must not be `None`") # noqa: E501 self._shipping = shipping @property def children(self): """Gets the children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: object """ return self._children @children.setter def children(self, children): """Sets the children of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param children: The children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: object """ if children is None: raise ValueError("Invalid value for `children`, must not be `None`") # noqa: E501 self._children = children @property def parent_id(self): """Gets the parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._parent_id @parent_id.setter def parent_id(self, parent_id): """Sets the parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param parent_id: The parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if parent_id is None: raise ValueError("Invalid value for `parent_id`, must not be `None`") # noqa: E501 self._parent_id = parent_id @property def parent_uuid(self): """Gets the parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._parent_uuid @parent_uuid.setter def parent_uuid(self, parent_uuid): """Sets the parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param parent_uuid: The parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if parent_uuid is None: raise ValueError("Invalid value for `parent_uuid`, must not be `None`") # noqa: E501 self._parent_uuid = parent_uuid @property def size_sort_order(self): """Gets the size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: int """ return self._size_sort_order @size_sort_order.setter def size_sort_order(self, size_sort_order): """Sets the size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param size_sort_order: The size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: int """ if size_sort_order is None: raise ValueError("Invalid value for `size_sort_order`, must not be `None`") # noqa: E501 self._size_sort_order = size_sort_order @property def shoe_size(self): """Gets the shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._shoe_size @shoe_size.setter def shoe_size(self, shoe_size): """Sets the shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param shoe_size: The shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if shoe_size is None: raise ValueError("Invalid value for `shoe_size`, must not be `None`") # noqa: E501 self._shoe_size = shoe_size @property def market(self): """Gets the market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: PortfolioitemsIdGetResponsePortfolioItemProductMarket """ return self._market @market.setter def market(self, market): """Sets the market of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param market: The market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: PortfolioitemsIdGetResponsePortfolioItemProductMarket """ if market is None: raise ValueError("Invalid value for `market`, must not be `None`") # noqa: E501 self._market = market @property def upc(self): """Gets the upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :return: The upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :rtype: str """ return self._upc @upc.setter def upc(self, upc): """Sets the upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. :param upc: The upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501 :type: str """ if upc is None: raise ValueError("Invalid value for `upc`, must not be `None`") # noqa: E501 self._upc = upc def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PortfolioitemsIdGetResponsePortfolioItemProduct): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/product_info_attributes.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.product_info_attributes_traits import ProductInfoAttributesTraits # noqa: F401,E501 class ProductInfoAttributes(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'product_uuid': 'str', 'sku': 'str', 'traits': 'ProductInfoAttributesTraits' } attribute_map = { 'product_uuid': 'product_uuid', 'sku': 'sku', 'traits': 'traits' } def __init__(self, product_uuid=None, sku=None, traits=None): # noqa: E501 """ProductInfoAttributes - a model defined in Swagger""" # noqa: E501 self._product_uuid = None self._sku = None self._traits = None self.discriminator = None if product_uuid is not None: self.product_uuid = product_uuid if sku is not None: self.sku = sku if traits is not None: self.traits = traits @property def product_uuid(self): """Gets the product_uuid of this ProductInfoAttributes. # noqa: E501 :return: The product_uuid of this ProductInfoAttributes. # noqa: E501 :rtype: str """ return self._product_uuid @product_uuid.setter def product_uuid(self, product_uuid): """Sets the product_uuid of this ProductInfoAttributes. :param product_uuid: The product_uuid of this ProductInfoAttributes. # noqa: E501 :type: str """ self._product_uuid = product_uuid @property def sku(self): """Gets the sku of this ProductInfoAttributes. # noqa: E501 :return: The sku of this ProductInfoAttributes. # noqa: E501 :rtype: str """ return self._sku @sku.setter def sku(self, sku): """Sets the sku of this ProductInfoAttributes. :param sku: The sku of this ProductInfoAttributes. # noqa: E501 :type: str """ self._sku = sku @property def traits(self): """Gets the traits of this ProductInfoAttributes. # noqa: E501 :return: The traits of this ProductInfoAttributes. # noqa: E501 :rtype: ProductInfoAttributesTraits """ return self._traits @traits.setter def traits(self, traits): """Sets the traits of this ProductInfoAttributes. :param traits: The traits of this ProductInfoAttributes. # noqa: E501 :type: ProductInfoAttributesTraits """ self._traits = traits def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ProductInfoAttributes): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/search_hit_searchable_traits.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class SearchHitSearchableTraits(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'style': 'str', 'colorway': 'str', 'retail_price': 'int', 'release_date': 'str' } attribute_map = { 'style': 'Style', 'colorway': 'Colorway', 'retail_price': 'Retail Price', 'release_date': 'Release Date' } def __init__(self, style=None, colorway=None, retail_price=None, release_date=None): # noqa: E501 """SearchHitSearchableTraits - a model defined in Swagger""" # noqa: E501 self._style = None self._colorway = None self._retail_price = None self._release_date = None self.discriminator = None if style is not None: self.style = style if colorway is not None: self.colorway = colorway if retail_price is not None: self.retail_price = retail_price if release_date is not None: self.release_date = release_date @property def style(self): """Gets the style of this SearchHitSearchableTraits. # noqa: E501 :return: The style of this SearchHitSearchableTraits. # noqa: E501 :rtype: str """ return self._style @style.setter def style(self, style): """Sets the style of this SearchHitSearchableTraits. :param style: The style of this SearchHitSearchableTraits. # noqa: E501 :type: str """ self._style = style @property def colorway(self): """Gets the colorway of this SearchHitSearchableTraits. # noqa: E501 :return: The colorway of this SearchHitSearchableTraits. # noqa: E501 :rtype: str """ return self._colorway @colorway.setter def colorway(self, colorway): """Sets the colorway of this SearchHitSearchableTraits. :param colorway: The colorway of this SearchHitSearchableTraits. # noqa: E501 :type: str """ self._colorway = colorway @property def retail_price(self): """Gets the retail_price of this SearchHitSearchableTraits. # noqa: E501 :return: The retail_price of this SearchHitSearchableTraits. # noqa: E501 :rtype: int """ return self._retail_price @retail_price.setter def retail_price(self, retail_price): """Sets the retail_price of this SearchHitSearchableTraits. :param retail_price: The retail_price of this SearchHitSearchableTraits. # noqa: E501 :type: int """ self._retail_price = retail_price @property def release_date(self): """Gets the release_date of this SearchHitSearchableTraits. # noqa: E501 :return: The release_date of this SearchHitSearchableTraits. # noqa: E501 :rtype: str """ return self._release_date @release_date.setter def release_date(self, release_date): """Sets the release_date of this SearchHitSearchableTraits. :param release_date: The release_date of this SearchHitSearchableTraits. # noqa: E501 :type: str """ self._release_date = release_date def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SearchHitSearchableTraits): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/io_stockx/models/search_results.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.search_hit import SearchHit # noqa: F401,E501 class SearchResults(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'hits': 'list[SearchHit]', 'nb_hits': 'int' } attribute_map = { 'hits': 'hits', 'nb_hits': 'nbHits' } def __init__(self, hits=None, nb_hits=None): # noqa: E501 """SearchResults - a model defined in Swagger""" # noqa: E501 self._hits = None self._nb_hits = None self.discriminator = None if hits is not None: self.hits = hits if nb_hits is not None: self.nb_hits = nb_hits @property def hits(self): """Gets the hits of this SearchResults. # noqa: E501 :return: The hits of this SearchResults. # noqa: E501 :rtype: list[SearchHit] """ return self._hits @hits.setter def hits(self, hits): """Sets the hits of this SearchResults. :param hits: The hits of this SearchResults. # noqa: E501 :type: list[SearchHit] """ self._hits = hits @property def nb_hits(self): """Gets the nb_hits of this SearchResults. # noqa: E501 :return: The nb_hits of this SearchResults. # noqa: E501 :rtype: int """ return self._nb_hits @nb_hits.setter def nb_hits(self, nb_hits): """Sets the nb_hits of this SearchResults. :param nb_hits: The nb_hits of this SearchResults. # noqa: E501 :type: int """ self._nb_hits = nb_hits def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SearchResults): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/build/lib/test/test_portfolio_id_del_response_portfolio_item_product_media.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import io_stockx from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia # noqa: E501 from io_stockx.rest import ApiException class TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase): """PortfolioIdDelResponsePortfolioItemProductMedia unit test stubs""" def setUp(self): pass def tearDown(self): pass def testPortfolioIdDelResponsePortfolioItemProductMedia(self): """Test PortfolioIdDelResponsePortfolioItemProductMedia""" # FIXME: construct object with mandatory attributes with example values # model = io_stockx.models.portfolio_id_del_response_portfolio_item_product_media.PortfolioIdDelResponsePortfolioItemProductMedia() # noqa: E501 pass if __name__ == '__main__': unittest.main()
/sdk/python/lib/io_stockx/models/address_object.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class AddressObject(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'first_name': 'str', 'last_name': 'str', 'telephone': 'str', 'street_address': 'str', 'extended_address': 'str', 'locality': 'str', 'region': 'str', 'postal_code': 'str', 'country_code_alpha2': 'str' } attribute_map = { 'first_name': 'firstName', 'last_name': 'lastName', 'telephone': 'telephone', 'street_address': 'streetAddress', 'extended_address': 'extendedAddress', 'locality': 'locality', 'region': 'region', 'postal_code': 'postalCode', 'country_code_alpha2': 'countryCodeAlpha2' } def __init__(self, first_name=None, last_name=None, telephone=None, street_address=None, extended_address=None, locality=None, region=None, postal_code=None, country_code_alpha2=None): # noqa: E501 """AddressObject - a model defined in Swagger""" # noqa: E501 self._first_name = None self._last_name = None self._telephone = None self._street_address = None self._extended_address = None self._locality = None self._region = None self._postal_code = None self._country_code_alpha2 = None self.discriminator = None if first_name is not None: self.first_name = first_name if last_name is not None: self.last_name = last_name if telephone is not None: self.telephone = telephone if street_address is not None: self.street_address = street_address if extended_address is not None: self.extended_address = extended_address if locality is not None: self.locality = locality if region is not None: self.region = region if postal_code is not None: self.postal_code = postal_code if country_code_alpha2 is not None: self.country_code_alpha2 = country_code_alpha2 @property def first_name(self): """Gets the first_name of this AddressObject. # noqa: E501 :return: The first_name of this AddressObject. # noqa: E501 :rtype: str """ return self._first_name @first_name.setter def first_name(self, first_name): """Sets the first_name of this AddressObject. :param first_name: The first_name of this AddressObject. # noqa: E501 :type: str """ self._first_name = first_name @property def last_name(self): """Gets the last_name of this AddressObject. # noqa: E501 :return: The last_name of this AddressObject. # noqa: E501 :rtype: str """ return self._last_name @last_name.setter def last_name(self, last_name): """Sets the last_name of this AddressObject. :param last_name: The last_name of this AddressObject. # noqa: E501 :type: str """ self._last_name = last_name @property def telephone(self): """Gets the telephone of this AddressObject. # noqa: E501 :return: The telephone of this AddressObject. # noqa: E501 :rtype: str """ return self._telephone @telephone.setter def telephone(self, telephone): """Sets the telephone of this AddressObject. :param telephone: The telephone of this AddressObject. # noqa: E501 :type: str """ self._telephone = telephone @property def street_address(self): """Gets the street_address of this AddressObject. # noqa: E501 :return: The street_address of this AddressObject. # noqa: E501 :rtype: str """ return self._street_address @street_address.setter def street_address(self, street_address): """Sets the street_address of this AddressObject. :param street_address: The street_address of this AddressObject. # noqa: E501 :type: str """ self._street_address = street_address @property def extended_address(self): """Gets the extended_address of this AddressObject. # noqa: E501 :return: The extended_address of this AddressObject. # noqa: E501 :rtype: str """ return self._extended_address @extended_address.setter def extended_address(self, extended_address): """Sets the extended_address of this AddressObject. :param extended_address: The extended_address of this AddressObject. # noqa: E501 :type: str """ self._extended_address = extended_address @property def locality(self): """Gets the locality of this AddressObject. # noqa: E501 :return: The locality of this AddressObject. # noqa: E501 :rtype: str """ return self._locality @locality.setter def locality(self, locality): """Sets the locality of this AddressObject. :param locality: The locality of this AddressObject. # noqa: E501 :type: str """ self._locality = locality @property def region(self): """Gets the region of this AddressObject. # noqa: E501 :return: The region of this AddressObject. # noqa: E501 :rtype: str """ return self._region @region.setter def region(self, region): """Sets the region of this AddressObject. :param region: The region of this AddressObject. # noqa: E501 :type: str """ self._region = region @property def postal_code(self): """Gets the postal_code of this AddressObject. # noqa: E501 :return: The postal_code of this AddressObject. # noqa: E501 :rtype: str """ return self._postal_code @postal_code.setter def postal_code(self, postal_code): """Sets the postal_code of this AddressObject. :param postal_code: The postal_code of this AddressObject. # noqa: E501 :type: str """ self._postal_code = postal_code @property def country_code_alpha2(self): """Gets the country_code_alpha2 of this AddressObject. # noqa: E501 :return: The country_code_alpha2 of this AddressObject. # noqa: E501 :rtype: str """ return self._country_code_alpha2 @country_code_alpha2.setter def country_code_alpha2(self, country_code_alpha2): """Sets the country_code_alpha2 of this AddressObject. :param country_code_alpha2: The country_code_alpha2 of this AddressObject. # noqa: E501 :type: str """ self._country_code_alpha2 = country_code_alpha2 def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, AddressObject): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/customer_object.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.billing_object import BillingObject # noqa: F401,E501 from io_stockx.models.customer_object_merchant import CustomerObjectMerchant # noqa: F401,E501 from io_stockx.models.customer_object_security import CustomerObjectSecurity # noqa: F401,E501 from io_stockx.models.customer_object_shipping import CustomerObjectShipping # noqa: F401,E501 class CustomerObject(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'str', 'uuid': 'str', 'first_name': 'str', 'last_name': 'str', 'full_name': 'str', 'email': 'str', 'username': 'str', 'email_verified': 'bool', 'default_size': 'str', 'categories': 'list[str]', 'default_category': 'str', 'vacation_date': 'str', 'is_active': 'bool', 'flagged': 'bool', 'hide_portfolio_banner': 'bool', 'refer_url': 'str', 'created_at': 'str', 'created_at_time': 'float', 'is_trader': 'bool', 'ship_by_date': 'bool', 'is_buying': 'bool', 'is_selling': 'bool', 'billing': 'BillingObject', 'shipping': 'CustomerObjectShipping', 'cc_only': 'BillingObject', 'merchant': 'CustomerObjectMerchant', 'promotion_code': 'str', 'paypal_emails': 'str', 'authorization_method': 'str', 'security_override': 'bool', 'team_member': 'bool', 'password_locked': 'bool', 'address_normalize_override': 'bool', 'early_payout_enabled': 'bool', 'early_payout_eligible': 'bool', 'security': 'CustomerObjectSecurity' } attribute_map = { 'id': 'id', 'uuid': 'uuid', 'first_name': 'firstName', 'last_name': 'lastName', 'full_name': 'fullName', 'email': 'email', 'username': 'username', 'email_verified': 'emailVerified', 'default_size': 'defaultSize', 'categories': 'categories', 'default_category': 'defaultCategory', 'vacation_date': 'vacationDate', 'is_active': 'isActive', 'flagged': 'flagged', 'hide_portfolio_banner': 'hidePortfolioBanner', 'refer_url': 'referUrl', 'created_at': 'createdAt', 'created_at_time': 'createdAtTime', 'is_trader': 'isTrader', 'ship_by_date': 'shipByDate', 'is_buying': 'isBuying', 'is_selling': 'isSelling', 'billing': 'Billing', 'shipping': 'Shipping', 'cc_only': 'CCOnly', 'merchant': 'Merchant', 'promotion_code': 'promotionCode', 'paypal_emails': 'paypalEmails', 'authorization_method': 'authorizationMethod', 'security_override': 'securityOverride', 'team_member': 'teamMember', 'password_locked': 'passwordLocked', 'address_normalize_override': 'addressNormalizeOverride', 'early_payout_enabled': 'earlyPayoutEnabled', 'early_payout_eligible': 'earlyPayoutEligible', 'security': 'security' } def __init__(self, id=None, uuid=None, first_name=None, last_name=None, full_name=None, email=None, username=None, email_verified=None, default_size=None, categories=None, default_category=None, vacation_date=None, is_active=None, flagged=None, hide_portfolio_banner=None, refer_url=None, created_at=None, created_at_time=None, is_trader=None, ship_by_date=None, is_buying=None, is_selling=None, billing=None, shipping=None, cc_only=None, merchant=None, promotion_code=None, paypal_emails=None, authorization_method=None, security_override=None, team_member=None, password_locked=None, address_normalize_override=None, early_payout_enabled=None, early_payout_eligible=None, security=None): # noqa: E501 """CustomerObject - a model defined in Swagger""" # noqa: E501 self._id = None self._uuid = None self._first_name = None self._last_name = None self._full_name = None self._email = None self._username = None self._email_verified = None self._default_size = None self._categories = None self._default_category = None self._vacation_date = None self._is_active = None self._flagged = None self._hide_portfolio_banner = None self._refer_url = None self._created_at = None self._created_at_time = None self._is_trader = None self._ship_by_date = None self._is_buying = None self._is_selling = None self._billing = None self._shipping = None self._cc_only = None self._merchant = None self._promotion_code = None self._paypal_emails = None self._authorization_method = None self._security_override = None self._team_member = None self._password_locked = None self._address_normalize_override = None self._early_payout_enabled = None self._early_payout_eligible = None self._security = None self.discriminator = None if id is not None: self.id = id if uuid is not None: self.uuid = uuid if first_name is not None: self.first_name = first_name if last_name is not None: self.last_name = last_name if full_name is not None: self.full_name = full_name if email is not None: self.email = email if username is not None: self.username = username if email_verified is not None: self.email_verified = email_verified if default_size is not None: self.default_size = default_size if categories is not None: self.categories = categories if default_category is not None: self.default_category = default_category if vacation_date is not None: self.vacation_date = vacation_date if is_active is not None: self.is_active = is_active if flagged is not None: self.flagged = flagged if hide_portfolio_banner is not None: self.hide_portfolio_banner = hide_portfolio_banner if refer_url is not None: self.refer_url = refer_url if created_at is not None: self.created_at = created_at if created_at_time is not None: self.created_at_time = created_at_time if is_trader is not None: self.is_trader = is_trader if ship_by_date is not None: self.ship_by_date = ship_by_date if is_buying is not None: self.is_buying = is_buying if is_selling is not None: self.is_selling = is_selling if billing is not None: self.billing = billing if shipping is not None: self.shipping = shipping if cc_only is not None: self.cc_only = cc_only if merchant is not None: self.merchant = merchant if promotion_code is not None: self.promotion_code = promotion_code if paypal_emails is not None: self.paypal_emails = paypal_emails if authorization_method is not None: self.authorization_method = authorization_method if security_override is not None: self.security_override = security_override if team_member is not None: self.team_member = team_member if password_locked is not None: self.password_locked = password_locked if address_normalize_override is not None: self.address_normalize_override = address_normalize_override if early_payout_enabled is not None: self.early_payout_enabled = early_payout_enabled if early_payout_eligible is not None: self.early_payout_eligible = early_payout_eligible if security is not None: self.security = security @property def id(self): """Gets the id of this CustomerObject. # noqa: E501 :return: The id of this CustomerObject. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this CustomerObject. :param id: The id of this CustomerObject. # noqa: E501 :type: str """ self._id = id @property def uuid(self): """Gets the uuid of this CustomerObject. # noqa: E501 :return: The uuid of this CustomerObject. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this CustomerObject. :param uuid: The uuid of this CustomerObject. # noqa: E501 :type: str """ self._uuid = uuid @property def first_name(self): """Gets the first_name of this CustomerObject. # noqa: E501 :return: The first_name of this CustomerObject. # noqa: E501 :rtype: str """ return self._first_name @first_name.setter def first_name(self, first_name): """Sets the first_name of this CustomerObject. :param first_name: The first_name of this CustomerObject. # noqa: E501 :type: str """ self._first_name = first_name @property def last_name(self): """Gets the last_name of this CustomerObject. # noqa: E501 :return: The last_name of this CustomerObject. # noqa: E501 :rtype: str """ return self._last_name @last_name.setter def last_name(self, last_name): """Sets the last_name of this CustomerObject. :param last_name: The last_name of this CustomerObject. # noqa: E501 :type: str """ self._last_name = last_name @property def full_name(self): """Gets the full_name of this CustomerObject. # noqa: E501 :return: The full_name of this CustomerObject. # noqa: E501 :rtype: str """ return self._full_name @full_name.setter def full_name(self, full_name): """Sets the full_name of this CustomerObject. :param full_name: The full_name of this CustomerObject. # noqa: E501 :type: str """ self._full_name = full_name @property def email(self): """Gets the email of this CustomerObject. # noqa: E501 :return: The email of this CustomerObject. # noqa: E501 :rtype: str """ return self._email @email.setter def email(self, email): """Sets the email of this CustomerObject. :param email: The email of this CustomerObject. # noqa: E501 :type: str """ self._email = email @property def username(self): """Gets the username of this CustomerObject. # noqa: E501 :return: The username of this CustomerObject. # noqa: E501 :rtype: str """ return self._username @username.setter def username(self, username): """Sets the username of this CustomerObject. :param username: The username of this CustomerObject. # noqa: E501 :type: str """ self._username = username @property def email_verified(self): """Gets the email_verified of this CustomerObject. # noqa: E501 :return: The email_verified of this CustomerObject. # noqa: E501 :rtype: bool """ return self._email_verified @email_verified.setter def email_verified(self, email_verified): """Sets the email_verified of this CustomerObject. :param email_verified: The email_verified of this CustomerObject. # noqa: E501 :type: bool """ self._email_verified = email_verified @property def default_size(self): """Gets the default_size of this CustomerObject. # noqa: E501 :return: The default_size of this CustomerObject. # noqa: E501 :rtype: str """ return self._default_size @default_size.setter def default_size(self, default_size): """Sets the default_size of this CustomerObject. :param default_size: The default_size of this CustomerObject. # noqa: E501 :type: str """ self._default_size = default_size @property def categories(self): """Gets the categories of this CustomerObject. # noqa: E501 :return: The categories of this CustomerObject. # noqa: E501 :rtype: list[str] """ return self._categories @categories.setter def categories(self, categories): """Sets the categories of this CustomerObject. :param categories: The categories of this CustomerObject. # noqa: E501 :type: list[str] """ self._categories = categories @property def default_category(self): """Gets the default_category of this CustomerObject. # noqa: E501 :return: The default_category of this CustomerObject. # noqa: E501 :rtype: str """ return self._default_category @default_category.setter def default_category(self, default_category): """Sets the default_category of this CustomerObject. :param default_category: The default_category of this CustomerObject. # noqa: E501 :type: str """ self._default_category = default_category @property def vacation_date(self): """Gets the vacation_date of this CustomerObject. # noqa: E501 :return: The vacation_date of this CustomerObject. # noqa: E501 :rtype: str """ return self._vacation_date @vacation_date.setter def vacation_date(self, vacation_date): """Sets the vacation_date of this CustomerObject. :param vacation_date: The vacation_date of this CustomerObject. # noqa: E501 :type: str """ self._vacation_date = vacation_date @property def is_active(self): """Gets the is_active of this CustomerObject. # noqa: E501 :return: The is_active of this CustomerObject. # noqa: E501 :rtype: bool """ return self._is_active @is_active.setter def is_active(self, is_active): """Sets the is_active of this CustomerObject. :param is_active: The is_active of this CustomerObject. # noqa: E501 :type: bool """ self._is_active = is_active @property def flagged(self): """Gets the flagged of this CustomerObject. # noqa: E501 :return: The flagged of this CustomerObject. # noqa: E501 :rtype: bool """ return self._flagged @flagged.setter def flagged(self, flagged): """Sets the flagged of this CustomerObject. :param flagged: The flagged of this CustomerObject. # noqa: E501 :type: bool """ self._flagged = flagged @property def hide_portfolio_banner(self): """Gets the hide_portfolio_banner of this CustomerObject. # noqa: E501 :return: The hide_portfolio_banner of this CustomerObject. # noqa: E501 :rtype: bool """ return self._hide_portfolio_banner @hide_portfolio_banner.setter def hide_portfolio_banner(self, hide_portfolio_banner): """Sets the hide_portfolio_banner of this CustomerObject. :param hide_portfolio_banner: The hide_portfolio_banner of this CustomerObject. # noqa: E501 :type: bool """ self._hide_portfolio_banner = hide_portfolio_banner @property def refer_url(self): """Gets the refer_url of this CustomerObject. # noqa: E501 :return: The refer_url of this CustomerObject. # noqa: E501 :rtype: str """ return self._refer_url @refer_url.setter def refer_url(self, refer_url): """Sets the refer_url of this CustomerObject. :param refer_url: The refer_url of this CustomerObject. # noqa: E501 :type: str """ self._refer_url = refer_url @property def created_at(self): """Gets the created_at of this CustomerObject. # noqa: E501 :return: The created_at of this CustomerObject. # noqa: E501 :rtype: str """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this CustomerObject. :param created_at: The created_at of this CustomerObject. # noqa: E501 :type: str """ self._created_at = created_at @property def created_at_time(self): """Gets the created_at_time of this CustomerObject. # noqa: E501 :return: The created_at_time of this CustomerObject. # noqa: E501 :rtype: float """ return self._created_at_time @created_at_time.setter def created_at_time(self, created_at_time): """Sets the created_at_time of this CustomerObject. :param created_at_time: The created_at_time of this CustomerObject. # noqa: E501 :type: float """ self._created_at_time = created_at_time @property def is_trader(self): """Gets the is_trader of this CustomerObject. # noqa: E501 :return: The is_trader of this CustomerObject. # noqa: E501 :rtype: bool """ return self._is_trader @is_trader.setter def is_trader(self, is_trader): """Sets the is_trader of this CustomerObject. :param is_trader: The is_trader of this CustomerObject. # noqa: E501 :type: bool """ self._is_trader = is_trader @property def ship_by_date(self): """Gets the ship_by_date of this CustomerObject. # noqa: E501 :return: The ship_by_date of this CustomerObject. # noqa: E501 :rtype: bool """ return self._ship_by_date @ship_by_date.setter def ship_by_date(self, ship_by_date): """Sets the ship_by_date of this CustomerObject. :param ship_by_date: The ship_by_date of this CustomerObject. # noqa: E501 :type: bool """ self._ship_by_date = ship_by_date @property def is_buying(self): """Gets the is_buying of this CustomerObject. # noqa: E501 :return: The is_buying of this CustomerObject. # noqa: E501 :rtype: bool """ return self._is_buying @is_buying.setter def is_buying(self, is_buying): """Sets the is_buying of this CustomerObject. :param is_buying: The is_buying of this CustomerObject. # noqa: E501 :type: bool """ self._is_buying = is_buying @property def is_selling(self): """Gets the is_selling of this CustomerObject. # noqa: E501 :return: The is_selling of this CustomerObject. # noqa: E501 :rtype: bool """ return self._is_selling @is_selling.setter def is_selling(self, is_selling): """Sets the is_selling of this CustomerObject. :param is_selling: The is_selling of this CustomerObject. # noqa: E501 :type: bool """ self._is_selling = is_selling @property def billing(self): """Gets the billing of this CustomerObject. # noqa: E501 :return: The billing of this CustomerObject. # noqa: E501 :rtype: BillingObject """ return self._billing @billing.setter def billing(self, billing): """Sets the billing of this CustomerObject. :param billing: The billing of this CustomerObject. # noqa: E501 :type: BillingObject """ self._billing = billing @property def shipping(self): """Gets the shipping of this CustomerObject. # noqa: E501 :return: The shipping of this CustomerObject. # noqa: E501 :rtype: CustomerObjectShipping """ return self._shipping @shipping.setter def shipping(self, shipping): """Sets the shipping of this CustomerObject. :param shipping: The shipping of this CustomerObject. # noqa: E501 :type: CustomerObjectShipping """ self._shipping = shipping @property def cc_only(self): """Gets the cc_only of this CustomerObject. # noqa: E501 :return: The cc_only of this CustomerObject. # noqa: E501 :rtype: BillingObject """ return self._cc_only @cc_only.setter def cc_only(self, cc_only): """Sets the cc_only of this CustomerObject. :param cc_only: The cc_only of this CustomerObject. # noqa: E501 :type: BillingObject """ self._cc_only = cc_only @property def merchant(self): """Gets the merchant of this CustomerObject. # noqa: E501 :return: The merchant of this CustomerObject. # noqa: E501 :rtype: CustomerObjectMerchant """ return self._merchant @merchant.setter def merchant(self, merchant): """Sets the merchant of this CustomerObject. :param merchant: The merchant of this CustomerObject. # noqa: E501 :type: CustomerObjectMerchant """ self._merchant = merchant @property def promotion_code(self): """Gets the promotion_code of this CustomerObject. # noqa: E501 :return: The promotion_code of this CustomerObject. # noqa: E501 :rtype: str """ return self._promotion_code @promotion_code.setter def promotion_code(self, promotion_code): """Sets the promotion_code of this CustomerObject. :param promotion_code: The promotion_code of this CustomerObject. # noqa: E501 :type: str """ self._promotion_code = promotion_code @property def paypal_emails(self): """Gets the paypal_emails of this CustomerObject. # noqa: E501 :return: The paypal_emails of this CustomerObject. # noqa: E501 :rtype: str """ return self._paypal_emails @paypal_emails.setter def paypal_emails(self, paypal_emails): """Sets the paypal_emails of this CustomerObject. :param paypal_emails: The paypal_emails of this CustomerObject. # noqa: E501 :type: str """ self._paypal_emails = paypal_emails @property def authorization_method(self): """Gets the authorization_method of this CustomerObject. # noqa: E501 :return: The authorization_method of this CustomerObject. # noqa: E501 :rtype: str """ return self._authorization_method @authorization_method.setter def authorization_method(self, authorization_method): """Sets the authorization_method of this CustomerObject. :param authorization_method: The authorization_method of this CustomerObject. # noqa: E501 :type: str """ self._authorization_method = authorization_method @property def security_override(self): """Gets the security_override of this CustomerObject. # noqa: E501 :return: The security_override of this CustomerObject. # noqa: E501 :rtype: bool """ return self._security_override @security_override.setter def security_override(self, security_override): """Sets the security_override of this CustomerObject. :param security_override: The security_override of this CustomerObject. # noqa: E501 :type: bool """ self._security_override = security_override @property def team_member(self): """Gets the team_member of this CustomerObject. # noqa: E501 :return: The team_member of this CustomerObject. # noqa: E501 :rtype: bool """ return self._team_member @team_member.setter def team_member(self, team_member): """Sets the team_member of this CustomerObject. :param team_member: The team_member of this CustomerObject. # noqa: E501 :type: bool """ self._team_member = team_member @property def password_locked(self): """Gets the password_locked of this CustomerObject. # noqa: E501 :return: The password_locked of this CustomerObject. # noqa: E501 :rtype: bool """ return self._password_locked @password_locked.setter def password_locked(self, password_locked): """Sets the password_locked of this CustomerObject. :param password_locked: The password_locked of this CustomerObject. # noqa: E501 :type: bool """ self._password_locked = password_locked @property def address_normalize_override(self): """Gets the address_normalize_override of this CustomerObject. # noqa: E501 :return: The address_normalize_override of this CustomerObject. # noqa: E501 :rtype: bool """ return self._address_normalize_override @address_normalize_override.setter def address_normalize_override(self, address_normalize_override): """Sets the address_normalize_override of this CustomerObject. :param address_normalize_override: The address_normalize_override of this CustomerObject. # noqa: E501 :type: bool """ self._address_normalize_override = address_normalize_override @property def early_payout_enabled(self): """Gets the early_payout_enabled of this CustomerObject. # noqa: E501 :return: The early_payout_enabled of this CustomerObject. # noqa: E501 :rtype: bool """ return self._early_payout_enabled @early_payout_enabled.setter def early_payout_enabled(self, early_payout_enabled): """Sets the early_payout_enabled of this CustomerObject. :param early_payout_enabled: The early_payout_enabled of this CustomerObject. # noqa: E501 :type: bool """ self._early_payout_enabled = early_payout_enabled @property def early_payout_eligible(self): """Gets the early_payout_eligible of this CustomerObject. # noqa: E501 :return: The early_payout_eligible of this CustomerObject. # noqa: E501 :rtype: bool """ return self._early_payout_eligible @early_payout_eligible.setter def early_payout_eligible(self, early_payout_eligible): """Sets the early_payout_eligible of this CustomerObject. :param early_payout_eligible: The early_payout_eligible of this CustomerObject. # noqa: E501 :type: bool """ self._early_payout_eligible = early_payout_eligible @property def security(self): """Gets the security of this CustomerObject. # noqa: E501 :return: The security of this CustomerObject. # noqa: E501 :rtype: CustomerObjectSecurity """ return self._security @security.setter def security(self, security): """Sets the security of this CustomerObject. :param security: The security of this CustomerObject. # noqa: E501 :type: CustomerObjectSecurity """ self._security = security def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CustomerObject): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/customer_object_merchant.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class CustomerObjectMerchant(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'merchant_id': 'str', 'paypal_email': 'str', 'preferred_payout': 'str', 'account_name': 'str' } attribute_map = { 'merchant_id': 'merchantId', 'paypal_email': 'paypalEmail', 'preferred_payout': 'preferredPayout', 'account_name': 'accountName' } def __init__(self, merchant_id=None, paypal_email=None, preferred_payout=None, account_name=None): # noqa: E501 """CustomerObjectMerchant - a model defined in Swagger""" # noqa: E501 self._merchant_id = None self._paypal_email = None self._preferred_payout = None self._account_name = None self.discriminator = None if merchant_id is not None: self.merchant_id = merchant_id if paypal_email is not None: self.paypal_email = paypal_email if preferred_payout is not None: self.preferred_payout = preferred_payout if account_name is not None: self.account_name = account_name @property def merchant_id(self): """Gets the merchant_id of this CustomerObjectMerchant. # noqa: E501 :return: The merchant_id of this CustomerObjectMerchant. # noqa: E501 :rtype: str """ return self._merchant_id @merchant_id.setter def merchant_id(self, merchant_id): """Sets the merchant_id of this CustomerObjectMerchant. :param merchant_id: The merchant_id of this CustomerObjectMerchant. # noqa: E501 :type: str """ self._merchant_id = merchant_id @property def paypal_email(self): """Gets the paypal_email of this CustomerObjectMerchant. # noqa: E501 :return: The paypal_email of this CustomerObjectMerchant. # noqa: E501 :rtype: str """ return self._paypal_email @paypal_email.setter def paypal_email(self, paypal_email): """Sets the paypal_email of this CustomerObjectMerchant. :param paypal_email: The paypal_email of this CustomerObjectMerchant. # noqa: E501 :type: str """ self._paypal_email = paypal_email @property def preferred_payout(self): """Gets the preferred_payout of this CustomerObjectMerchant. # noqa: E501 :return: The preferred_payout of this CustomerObjectMerchant. # noqa: E501 :rtype: str """ return self._preferred_payout @preferred_payout.setter def preferred_payout(self, preferred_payout): """Sets the preferred_payout of this CustomerObjectMerchant. :param preferred_payout: The preferred_payout of this CustomerObjectMerchant. # noqa: E501 :type: str """ self._preferred_payout = preferred_payout @property def account_name(self): """Gets the account_name of this CustomerObjectMerchant. # noqa: E501 :return: The account_name of this CustomerObjectMerchant. # noqa: E501 :rtype: str """ return self._account_name @account_name.setter def account_name(self, account_name): """Sets the account_name of this CustomerObjectMerchant. :param account_name: The account_name of this CustomerObjectMerchant. # noqa: E501 :type: str """ self._account_name = account_name def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CustomerObjectMerchant): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/market_data_market.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class MarketDataMarket(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'product_id': 'int', 'sku_uuid': 'str', 'product_uuid': 'str', 'lowest_ask': 'int', 'lowest_ask_size': 'str', 'parent_lowest_ask': 'int', 'number_of_asks': 'int', 'sales_this_period': 'int', 'sales_last_period': 'int', 'highest_bid': 'int', 'highest_bid_size': 'str', 'number_of_bids': 'int', 'annual_high': 'int', 'annual_low': 'int', 'deadstock_range_low': 'int', 'deadstock_range_high': 'int', 'volatility': 'float', 'deadstock_sold': 'int', 'price_premium': 'float', 'average_deadstock_price': 'int', 'last_sale': 'int', 'last_sale_size': 'str', 'sales_last72_hours': 'int', 'change_value': 'int', 'change_percentage': 'float', 'abs_change_percentage': 'float', 'total_dollars': 'int', 'updated_at': 'int', 'last_lowest_ask_time': 'int', 'last_highest_bid_time': 'int', 'last_sale_date': 'str', 'created_at': 'str', 'deadstock_sold_rank': 'int', 'price_premium_rank': 'int', 'average_deadstock_price_rank': 'int', 'featured': 'str' } attribute_map = { 'product_id': 'productId', 'sku_uuid': 'skuUuid', 'product_uuid': 'productUuid', 'lowest_ask': 'lowestAsk', 'lowest_ask_size': 'lowestAskSize', 'parent_lowest_ask': 'parentLowestAsk', 'number_of_asks': 'numberOfAsks', 'sales_this_period': 'salesThisPeriod', 'sales_last_period': 'salesLastPeriod', 'highest_bid': 'highestBid', 'highest_bid_size': 'highestBidSize', 'number_of_bids': 'numberOfBids', 'annual_high': 'annualHigh', 'annual_low': 'annualLow', 'deadstock_range_low': 'deadstockRangeLow', 'deadstock_range_high': 'deadstockRangeHigh', 'volatility': 'volatility', 'deadstock_sold': 'deadstockSold', 'price_premium': 'pricePremium', 'average_deadstock_price': 'averageDeadstockPrice', 'last_sale': 'lastSale', 'last_sale_size': 'lastSaleSize', 'sales_last72_hours': 'salesLast72Hours', 'change_value': 'changeValue', 'change_percentage': 'changePercentage', 'abs_change_percentage': 'absChangePercentage', 'total_dollars': 'totalDollars', 'updated_at': 'updatedAt', 'last_lowest_ask_time': 'lastLowestAskTime', 'last_highest_bid_time': 'lastHighestBidTime', 'last_sale_date': 'lastSaleDate', 'created_at': 'createdAt', 'deadstock_sold_rank': 'deadstockSoldRank', 'price_premium_rank': 'pricePremiumRank', 'average_deadstock_price_rank': 'averageDeadstockPriceRank', 'featured': 'featured' } def __init__(self, product_id=None, sku_uuid=None, product_uuid=None, lowest_ask=None, lowest_ask_size=None, parent_lowest_ask=None, number_of_asks=None, sales_this_period=None, sales_last_period=None, highest_bid=None, highest_bid_size=None, number_of_bids=None, annual_high=None, annual_low=None, deadstock_range_low=None, deadstock_range_high=None, volatility=None, deadstock_sold=None, price_premium=None, average_deadstock_price=None, last_sale=None, last_sale_size=None, sales_last72_hours=None, change_value=None, change_percentage=None, abs_change_percentage=None, total_dollars=None, updated_at=None, last_lowest_ask_time=None, last_highest_bid_time=None, last_sale_date=None, created_at=None, deadstock_sold_rank=None, price_premium_rank=None, average_deadstock_price_rank=None, featured=None): # noqa: E501 """MarketDataMarket - a model defined in Swagger""" # noqa: E501 self._product_id = None self._sku_uuid = None self._product_uuid = None self._lowest_ask = None self._lowest_ask_size = None self._parent_lowest_ask = None self._number_of_asks = None self._sales_this_period = None self._sales_last_period = None self._highest_bid = None self._highest_bid_size = None self._number_of_bids = None self._annual_high = None self._annual_low = None self._deadstock_range_low = None self._deadstock_range_high = None self._volatility = None self._deadstock_sold = None self._price_premium = None self._average_deadstock_price = None self._last_sale = None self._last_sale_size = None self._sales_last72_hours = None self._change_value = None self._change_percentage = None self._abs_change_percentage = None self._total_dollars = None self._updated_at = None self._last_lowest_ask_time = None self._last_highest_bid_time = None self._last_sale_date = None self._created_at = None self._deadstock_sold_rank = None self._price_premium_rank = None self._average_deadstock_price_rank = None self._featured = None self.discriminator = None if product_id is not None: self.product_id = product_id if sku_uuid is not None: self.sku_uuid = sku_uuid if product_uuid is not None: self.product_uuid = product_uuid if lowest_ask is not None: self.lowest_ask = lowest_ask if lowest_ask_size is not None: self.lowest_ask_size = lowest_ask_size if parent_lowest_ask is not None: self.parent_lowest_ask = parent_lowest_ask if number_of_asks is not None: self.number_of_asks = number_of_asks if sales_this_period is not None: self.sales_this_period = sales_this_period if sales_last_period is not None: self.sales_last_period = sales_last_period if highest_bid is not None: self.highest_bid = highest_bid if highest_bid_size is not None: self.highest_bid_size = highest_bid_size if number_of_bids is not None: self.number_of_bids = number_of_bids if annual_high is not None: self.annual_high = annual_high if annual_low is not None: self.annual_low = annual_low if deadstock_range_low is not None: self.deadstock_range_low = deadstock_range_low if deadstock_range_high is not None: self.deadstock_range_high = deadstock_range_high if volatility is not None: self.volatility = volatility if deadstock_sold is not None: self.deadstock_sold = deadstock_sold if price_premium is not None: self.price_premium = price_premium if average_deadstock_price is not None: self.average_deadstock_price = average_deadstock_price if last_sale is not None: self.last_sale = last_sale if last_sale_size is not None: self.last_sale_size = last_sale_size if sales_last72_hours is not None: self.sales_last72_hours = sales_last72_hours if change_value is not None: self.change_value = change_value if change_percentage is not None: self.change_percentage = change_percentage if abs_change_percentage is not None: self.abs_change_percentage = abs_change_percentage if total_dollars is not None: self.total_dollars = total_dollars if updated_at is not None: self.updated_at = updated_at if last_lowest_ask_time is not None: self.last_lowest_ask_time = last_lowest_ask_time if last_highest_bid_time is not None: self.last_highest_bid_time = last_highest_bid_time if last_sale_date is not None: self.last_sale_date = last_sale_date if created_at is not None: self.created_at = created_at if deadstock_sold_rank is not None: self.deadstock_sold_rank = deadstock_sold_rank if price_premium_rank is not None: self.price_premium_rank = price_premium_rank if average_deadstock_price_rank is not None: self.average_deadstock_price_rank = average_deadstock_price_rank if featured is not None: self.featured = featured @property def product_id(self): """Gets the product_id of this MarketDataMarket. # noqa: E501 :return: The product_id of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._product_id @product_id.setter def product_id(self, product_id): """Sets the product_id of this MarketDataMarket. :param product_id: The product_id of this MarketDataMarket. # noqa: E501 :type: int """ self._product_id = product_id @property def sku_uuid(self): """Gets the sku_uuid of this MarketDataMarket. # noqa: E501 :return: The sku_uuid of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._sku_uuid @sku_uuid.setter def sku_uuid(self, sku_uuid): """Sets the sku_uuid of this MarketDataMarket. :param sku_uuid: The sku_uuid of this MarketDataMarket. # noqa: E501 :type: str """ self._sku_uuid = sku_uuid @property def product_uuid(self): """Gets the product_uuid of this MarketDataMarket. # noqa: E501 :return: The product_uuid of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._product_uuid @product_uuid.setter def product_uuid(self, product_uuid): """Sets the product_uuid of this MarketDataMarket. :param product_uuid: The product_uuid of this MarketDataMarket. # noqa: E501 :type: str """ self._product_uuid = product_uuid @property def lowest_ask(self): """Gets the lowest_ask of this MarketDataMarket. # noqa: E501 :return: The lowest_ask of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._lowest_ask @lowest_ask.setter def lowest_ask(self, lowest_ask): """Sets the lowest_ask of this MarketDataMarket. :param lowest_ask: The lowest_ask of this MarketDataMarket. # noqa: E501 :type: int """ self._lowest_ask = lowest_ask @property def lowest_ask_size(self): """Gets the lowest_ask_size of this MarketDataMarket. # noqa: E501 :return: The lowest_ask_size of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._lowest_ask_size @lowest_ask_size.setter def lowest_ask_size(self, lowest_ask_size): """Sets the lowest_ask_size of this MarketDataMarket. :param lowest_ask_size: The lowest_ask_size of this MarketDataMarket. # noqa: E501 :type: str """ self._lowest_ask_size = lowest_ask_size @property def parent_lowest_ask(self): """Gets the parent_lowest_ask of this MarketDataMarket. # noqa: E501 :return: The parent_lowest_ask of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._parent_lowest_ask @parent_lowest_ask.setter def parent_lowest_ask(self, parent_lowest_ask): """Sets the parent_lowest_ask of this MarketDataMarket. :param parent_lowest_ask: The parent_lowest_ask of this MarketDataMarket. # noqa: E501 :type: int """ self._parent_lowest_ask = parent_lowest_ask @property def number_of_asks(self): """Gets the number_of_asks of this MarketDataMarket. # noqa: E501 :return: The number_of_asks of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._number_of_asks @number_of_asks.setter def number_of_asks(self, number_of_asks): """Sets the number_of_asks of this MarketDataMarket. :param number_of_asks: The number_of_asks of this MarketDataMarket. # noqa: E501 :type: int """ self._number_of_asks = number_of_asks @property def sales_this_period(self): """Gets the sales_this_period of this MarketDataMarket. # noqa: E501 :return: The sales_this_period of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._sales_this_period @sales_this_period.setter def sales_this_period(self, sales_this_period): """Sets the sales_this_period of this MarketDataMarket. :param sales_this_period: The sales_this_period of this MarketDataMarket. # noqa: E501 :type: int """ self._sales_this_period = sales_this_period @property def sales_last_period(self): """Gets the sales_last_period of this MarketDataMarket. # noqa: E501 :return: The sales_last_period of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._sales_last_period @sales_last_period.setter def sales_last_period(self, sales_last_period): """Sets the sales_last_period of this MarketDataMarket. :param sales_last_period: The sales_last_period of this MarketDataMarket. # noqa: E501 :type: int """ self._sales_last_period = sales_last_period @property def highest_bid(self): """Gets the highest_bid of this MarketDataMarket. # noqa: E501 :return: The highest_bid of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._highest_bid @highest_bid.setter def highest_bid(self, highest_bid): """Sets the highest_bid of this MarketDataMarket. :param highest_bid: The highest_bid of this MarketDataMarket. # noqa: E501 :type: int """ self._highest_bid = highest_bid @property def highest_bid_size(self): """Gets the highest_bid_size of this MarketDataMarket. # noqa: E501 :return: The highest_bid_size of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._highest_bid_size @highest_bid_size.setter def highest_bid_size(self, highest_bid_size): """Sets the highest_bid_size of this MarketDataMarket. :param highest_bid_size: The highest_bid_size of this MarketDataMarket. # noqa: E501 :type: str """ self._highest_bid_size = highest_bid_size @property def number_of_bids(self): """Gets the number_of_bids of this MarketDataMarket. # noqa: E501 :return: The number_of_bids of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._number_of_bids @number_of_bids.setter def number_of_bids(self, number_of_bids): """Sets the number_of_bids of this MarketDataMarket. :param number_of_bids: The number_of_bids of this MarketDataMarket. # noqa: E501 :type: int """ self._number_of_bids = number_of_bids @property def annual_high(self): """Gets the annual_high of this MarketDataMarket. # noqa: E501 :return: The annual_high of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._annual_high @annual_high.setter def annual_high(self, annual_high): """Sets the annual_high of this MarketDataMarket. :param annual_high: The annual_high of this MarketDataMarket. # noqa: E501 :type: int """ self._annual_high = annual_high @property def annual_low(self): """Gets the annual_low of this MarketDataMarket. # noqa: E501 :return: The annual_low of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._annual_low @annual_low.setter def annual_low(self, annual_low): """Sets the annual_low of this MarketDataMarket. :param annual_low: The annual_low of this MarketDataMarket. # noqa: E501 :type: int """ self._annual_low = annual_low @property def deadstock_range_low(self): """Gets the deadstock_range_low of this MarketDataMarket. # noqa: E501 :return: The deadstock_range_low of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._deadstock_range_low @deadstock_range_low.setter def deadstock_range_low(self, deadstock_range_low): """Sets the deadstock_range_low of this MarketDataMarket. :param deadstock_range_low: The deadstock_range_low of this MarketDataMarket. # noqa: E501 :type: int """ self._deadstock_range_low = deadstock_range_low @property def deadstock_range_high(self): """Gets the deadstock_range_high of this MarketDataMarket. # noqa: E501 :return: The deadstock_range_high of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._deadstock_range_high @deadstock_range_high.setter def deadstock_range_high(self, deadstock_range_high): """Sets the deadstock_range_high of this MarketDataMarket. :param deadstock_range_high: The deadstock_range_high of this MarketDataMarket. # noqa: E501 :type: int """ self._deadstock_range_high = deadstock_range_high @property def volatility(self): """Gets the volatility of this MarketDataMarket. # noqa: E501 :return: The volatility of this MarketDataMarket. # noqa: E501 :rtype: float """ return self._volatility @volatility.setter def volatility(self, volatility): """Sets the volatility of this MarketDataMarket. :param volatility: The volatility of this MarketDataMarket. # noqa: E501 :type: float """ self._volatility = volatility @property def deadstock_sold(self): """Gets the deadstock_sold of this MarketDataMarket. # noqa: E501 :return: The deadstock_sold of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._deadstock_sold @deadstock_sold.setter def deadstock_sold(self, deadstock_sold): """Sets the deadstock_sold of this MarketDataMarket. :param deadstock_sold: The deadstock_sold of this MarketDataMarket. # noqa: E501 :type: int """ self._deadstock_sold = deadstock_sold @property def price_premium(self): """Gets the price_premium of this MarketDataMarket. # noqa: E501 :return: The price_premium of this MarketDataMarket. # noqa: E501 :rtype: float """ return self._price_premium @price_premium.setter def price_premium(self, price_premium): """Sets the price_premium of this MarketDataMarket. :param price_premium: The price_premium of this MarketDataMarket. # noqa: E501 :type: float """ self._price_premium = price_premium @property def average_deadstock_price(self): """Gets the average_deadstock_price of this MarketDataMarket. # noqa: E501 :return: The average_deadstock_price of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._average_deadstock_price @average_deadstock_price.setter def average_deadstock_price(self, average_deadstock_price): """Sets the average_deadstock_price of this MarketDataMarket. :param average_deadstock_price: The average_deadstock_price of this MarketDataMarket. # noqa: E501 :type: int """ self._average_deadstock_price = average_deadstock_price @property def last_sale(self): """Gets the last_sale of this MarketDataMarket. # noqa: E501 :return: The last_sale of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._last_sale @last_sale.setter def last_sale(self, last_sale): """Sets the last_sale of this MarketDataMarket. :param last_sale: The last_sale of this MarketDataMarket. # noqa: E501 :type: int """ self._last_sale = last_sale @property def last_sale_size(self): """Gets the last_sale_size of this MarketDataMarket. # noqa: E501 :return: The last_sale_size of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._last_sale_size @last_sale_size.setter def last_sale_size(self, last_sale_size): """Sets the last_sale_size of this MarketDataMarket. :param last_sale_size: The last_sale_size of this MarketDataMarket. # noqa: E501 :type: str """ self._last_sale_size = last_sale_size @property def sales_last72_hours(self): """Gets the sales_last72_hours of this MarketDataMarket. # noqa: E501 :return: The sales_last72_hours of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._sales_last72_hours @sales_last72_hours.setter def sales_last72_hours(self, sales_last72_hours): """Sets the sales_last72_hours of this MarketDataMarket. :param sales_last72_hours: The sales_last72_hours of this MarketDataMarket. # noqa: E501 :type: int """ self._sales_last72_hours = sales_last72_hours @property def change_value(self): """Gets the change_value of this MarketDataMarket. # noqa: E501 :return: The change_value of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._change_value @change_value.setter def change_value(self, change_value): """Sets the change_value of this MarketDataMarket. :param change_value: The change_value of this MarketDataMarket. # noqa: E501 :type: int """ self._change_value = change_value @property def change_percentage(self): """Gets the change_percentage of this MarketDataMarket. # noqa: E501 :return: The change_percentage of this MarketDataMarket. # noqa: E501 :rtype: float """ return self._change_percentage @change_percentage.setter def change_percentage(self, change_percentage): """Sets the change_percentage of this MarketDataMarket. :param change_percentage: The change_percentage of this MarketDataMarket. # noqa: E501 :type: float """ self._change_percentage = change_percentage @property def abs_change_percentage(self): """Gets the abs_change_percentage of this MarketDataMarket. # noqa: E501 :return: The abs_change_percentage of this MarketDataMarket. # noqa: E501 :rtype: float """ return self._abs_change_percentage @abs_change_percentage.setter def abs_change_percentage(self, abs_change_percentage): """Sets the abs_change_percentage of this MarketDataMarket. :param abs_change_percentage: The abs_change_percentage of this MarketDataMarket. # noqa: E501 :type: float """ self._abs_change_percentage = abs_change_percentage @property def total_dollars(self): """Gets the total_dollars of this MarketDataMarket. # noqa: E501 :return: The total_dollars of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._total_dollars @total_dollars.setter def total_dollars(self, total_dollars): """Sets the total_dollars of this MarketDataMarket. :param total_dollars: The total_dollars of this MarketDataMarket. # noqa: E501 :type: int """ self._total_dollars = total_dollars @property def updated_at(self): """Gets the updated_at of this MarketDataMarket. # noqa: E501 :return: The updated_at of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._updated_at @updated_at.setter def updated_at(self, updated_at): """Sets the updated_at of this MarketDataMarket. :param updated_at: The updated_at of this MarketDataMarket. # noqa: E501 :type: int """ self._updated_at = updated_at @property def last_lowest_ask_time(self): """Gets the last_lowest_ask_time of this MarketDataMarket. # noqa: E501 :return: The last_lowest_ask_time of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._last_lowest_ask_time @last_lowest_ask_time.setter def last_lowest_ask_time(self, last_lowest_ask_time): """Sets the last_lowest_ask_time of this MarketDataMarket. :param last_lowest_ask_time: The last_lowest_ask_time of this MarketDataMarket. # noqa: E501 :type: int """ self._last_lowest_ask_time = last_lowest_ask_time @property def last_highest_bid_time(self): """Gets the last_highest_bid_time of this MarketDataMarket. # noqa: E501 :return: The last_highest_bid_time of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._last_highest_bid_time @last_highest_bid_time.setter def last_highest_bid_time(self, last_highest_bid_time): """Sets the last_highest_bid_time of this MarketDataMarket. :param last_highest_bid_time: The last_highest_bid_time of this MarketDataMarket. # noqa: E501 :type: int """ self._last_highest_bid_time = last_highest_bid_time @property def last_sale_date(self): """Gets the last_sale_date of this MarketDataMarket. # noqa: E501 :return: The last_sale_date of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._last_sale_date @last_sale_date.setter def last_sale_date(self, last_sale_date): """Sets the last_sale_date of this MarketDataMarket. :param last_sale_date: The last_sale_date of this MarketDataMarket. # noqa: E501 :type: str """ self._last_sale_date = last_sale_date @property def created_at(self): """Gets the created_at of this MarketDataMarket. # noqa: E501 :return: The created_at of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this MarketDataMarket. :param created_at: The created_at of this MarketDataMarket. # noqa: E501 :type: str """ self._created_at = created_at @property def deadstock_sold_rank(self): """Gets the deadstock_sold_rank of this MarketDataMarket. # noqa: E501 :return: The deadstock_sold_rank of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._deadstock_sold_rank @deadstock_sold_rank.setter def deadstock_sold_rank(self, deadstock_sold_rank): """Sets the deadstock_sold_rank of this MarketDataMarket. :param deadstock_sold_rank: The deadstock_sold_rank of this MarketDataMarket. # noqa: E501 :type: int """ self._deadstock_sold_rank = deadstock_sold_rank @property def price_premium_rank(self): """Gets the price_premium_rank of this MarketDataMarket. # noqa: E501 :return: The price_premium_rank of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._price_premium_rank @price_premium_rank.setter def price_premium_rank(self, price_premium_rank): """Sets the price_premium_rank of this MarketDataMarket. :param price_premium_rank: The price_premium_rank of this MarketDataMarket. # noqa: E501 :type: int """ self._price_premium_rank = price_premium_rank @property def average_deadstock_price_rank(self): """Gets the average_deadstock_price_rank of this MarketDataMarket. # noqa: E501 :return: The average_deadstock_price_rank of this MarketDataMarket. # noqa: E501 :rtype: int """ return self._average_deadstock_price_rank @average_deadstock_price_rank.setter def average_deadstock_price_rank(self, average_deadstock_price_rank): """Sets the average_deadstock_price_rank of this MarketDataMarket. :param average_deadstock_price_rank: The average_deadstock_price_rank of this MarketDataMarket. # noqa: E501 :type: int """ self._average_deadstock_price_rank = average_deadstock_price_rank @property def featured(self): """Gets the featured of this MarketDataMarket. # noqa: E501 :return: The featured of this MarketDataMarket. # noqa: E501 :rtype: str """ return self._featured @featured.setter def featured(self, featured): """Sets the featured of this MarketDataMarket. :param featured: The featured of this MarketDataMarket. # noqa: E501 :type: str """ self._featured = featured def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, MarketDataMarket): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_merchant.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PortfolioIdDelResponsePortfolioItemMerchant(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'id': 'int', 'customer_id': 'int', 'is_robot': 'int', 'name': 'str', 'paypal_email': 'str', 'take': 'float', 'created_at': 'str', 'created_at_time': 'int', 'updated_at': 'str', 'updated_at_time': 'int' } attribute_map = { 'id': 'id', 'customer_id': 'customerId', 'is_robot': 'isRobot', 'name': 'name', 'paypal_email': 'paypalEmail', 'take': 'take', 'created_at': 'createdAt', 'created_at_time': 'createdAtTime', 'updated_at': 'updatedAt', 'updated_at_time': 'updatedAtTime' } def __init__(self, id=None, customer_id=None, is_robot=None, name=None, paypal_email=None, take=None, created_at=None, created_at_time=None, updated_at=None, updated_at_time=None): # noqa: E501 """PortfolioIdDelResponsePortfolioItemMerchant - a model defined in Swagger""" # noqa: E501 self._id = None self._customer_id = None self._is_robot = None self._name = None self._paypal_email = None self._take = None self._created_at = None self._created_at_time = None self._updated_at = None self._updated_at_time = None self.discriminator = None self.id = id self.customer_id = customer_id self.is_robot = is_robot self.name = name self.paypal_email = paypal_email self.take = take self.created_at = created_at self.created_at_time = created_at_time self.updated_at = updated_at self.updated_at_time = updated_at_time @property def id(self): """Gets the id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: int """ return self._id @id.setter def id(self, id): """Sets the id of this PortfolioIdDelResponsePortfolioItemMerchant. :param id: The id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: int """ if id is None: raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501 self._id = id @property def customer_id(self): """Gets the customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: int """ return self._customer_id @customer_id.setter def customer_id(self, customer_id): """Sets the customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. :param customer_id: The customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: int """ if customer_id is None: raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501 self._customer_id = customer_id @property def is_robot(self): """Gets the is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: int """ return self._is_robot @is_robot.setter def is_robot(self, is_robot): """Sets the is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. :param is_robot: The is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: int """ if is_robot is None: raise ValueError("Invalid value for `is_robot`, must not be `None`") # noqa: E501 self._is_robot = is_robot @property def name(self): """Gets the name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this PortfolioIdDelResponsePortfolioItemMerchant. :param name: The name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: str """ if name is None: raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def paypal_email(self): """Gets the paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: str """ return self._paypal_email @paypal_email.setter def paypal_email(self, paypal_email): """Sets the paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. :param paypal_email: The paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: str """ if paypal_email is None: raise ValueError("Invalid value for `paypal_email`, must not be `None`") # noqa: E501 self._paypal_email = paypal_email @property def take(self): """Gets the take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: float """ return self._take @take.setter def take(self, take): """Sets the take of this PortfolioIdDelResponsePortfolioItemMerchant. :param take: The take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: float """ if take is None: raise ValueError("Invalid value for `take`, must not be `None`") # noqa: E501 self._take = take @property def created_at(self): """Gets the created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: str """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this PortfolioIdDelResponsePortfolioItemMerchant. :param created_at: The created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: str """ if created_at is None: raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501 self._created_at = created_at @property def created_at_time(self): """Gets the created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: int """ return self._created_at_time @created_at_time.setter def created_at_time(self, created_at_time): """Sets the created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. :param created_at_time: The created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: int """ if created_at_time is None: raise ValueError("Invalid value for `created_at_time`, must not be `None`") # noqa: E501 self._created_at_time = created_at_time @property def updated_at(self): """Gets the updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: str """ return self._updated_at @updated_at.setter def updated_at(self, updated_at): """Sets the updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. :param updated_at: The updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: str """ if updated_at is None: raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501 self._updated_at = updated_at @property def updated_at_time(self): """Gets the updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :return: The updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :rtype: int """ return self._updated_at_time @updated_at_time.setter def updated_at_time(self, updated_at_time): """Sets the updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. :param updated_at_time: The updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501 :type: int """ if updated_at_time is None: raise ValueError("Invalid value for `updated_at_time`, must not be `None`") # noqa: E501 self._updated_at_time = updated_at_time def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PortfolioIdDelResponsePortfolioItemMerchant): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/portfolio_request_portfolio_item.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class PortfolioRequestPortfolioItem(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'amount': 'str', 'expires_at': 'str', 'matched_with_date': 'str', 'condition': 'str', 'action': 'int', 'sku_uuid': 'str' } attribute_map = { 'amount': 'amount', 'expires_at': 'expiresAt', 'matched_with_date': 'matchedWithDate', 'condition': 'condition', 'action': 'action', 'sku_uuid': 'skuUuid' } def __init__(self, amount=None, expires_at=None, matched_with_date=None, condition=None, action=None, sku_uuid=None): # noqa: E501 """PortfolioRequestPortfolioItem - a model defined in Swagger""" # noqa: E501 self._amount = None self._expires_at = None self._matched_with_date = None self._condition = None self._action = None self._sku_uuid = None self.discriminator = None if amount is not None: self.amount = amount if expires_at is not None: self.expires_at = expires_at if matched_with_date is not None: self.matched_with_date = matched_with_date if condition is not None: self.condition = condition if action is not None: self.action = action if sku_uuid is not None: self.sku_uuid = sku_uuid @property def amount(self): """Gets the amount of this PortfolioRequestPortfolioItem. # noqa: E501 :return: The amount of this PortfolioRequestPortfolioItem. # noqa: E501 :rtype: str """ return self._amount @amount.setter def amount(self, amount): """Sets the amount of this PortfolioRequestPortfolioItem. :param amount: The amount of this PortfolioRequestPortfolioItem. # noqa: E501 :type: str """ self._amount = amount @property def expires_at(self): """Gets the expires_at of this PortfolioRequestPortfolioItem. # noqa: E501 :return: The expires_at of this PortfolioRequestPortfolioItem. # noqa: E501 :rtype: str """ return self._expires_at @expires_at.setter def expires_at(self, expires_at): """Sets the expires_at of this PortfolioRequestPortfolioItem. :param expires_at: The expires_at of this PortfolioRequestPortfolioItem. # noqa: E501 :type: str """ self._expires_at = expires_at @property def matched_with_date(self): """Gets the matched_with_date of this PortfolioRequestPortfolioItem. # noqa: E501 :return: The matched_with_date of this PortfolioRequestPortfolioItem. # noqa: E501 :rtype: str """ return self._matched_with_date @matched_with_date.setter def matched_with_date(self, matched_with_date): """Sets the matched_with_date of this PortfolioRequestPortfolioItem. :param matched_with_date: The matched_with_date of this PortfolioRequestPortfolioItem. # noqa: E501 :type: str """ self._matched_with_date = matched_with_date @property def condition(self): """Gets the condition of this PortfolioRequestPortfolioItem. # noqa: E501 :return: The condition of this PortfolioRequestPortfolioItem. # noqa: E501 :rtype: str """ return self._condition @condition.setter def condition(self, condition): """Sets the condition of this PortfolioRequestPortfolioItem. :param condition: The condition of this PortfolioRequestPortfolioItem. # noqa: E501 :type: str """ self._condition = condition @property def action(self): """Gets the action of this PortfolioRequestPortfolioItem. # noqa: E501 :return: The action of this PortfolioRequestPortfolioItem. # noqa: E501 :rtype: int """ return self._action @action.setter def action(self, action): """Sets the action of this PortfolioRequestPortfolioItem. :param action: The action of this PortfolioRequestPortfolioItem. # noqa: E501 :type: int """ self._action = action @property def sku_uuid(self): """Gets the sku_uuid of this PortfolioRequestPortfolioItem. # noqa: E501 :return: The sku_uuid of this PortfolioRequestPortfolioItem. # noqa: E501 :rtype: str """ return self._sku_uuid @sku_uuid.setter def sku_uuid(self, sku_uuid): """Sets the sku_uuid of this PortfolioRequestPortfolioItem. :param sku_uuid: The sku_uuid of this PortfolioRequestPortfolioItem. # noqa: E501 :type: str """ self._sku_uuid = sku_uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PortfolioRequestPortfolioItem): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/product_info_product_attributes.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class ProductInfoProductAttributes(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'product_category': 'str', 'url_key': 'str', 'slug': 'str', 'brand': 'str', 'ticker': 'str', 'style_id': 'str', 'model': 'str', 'name': 'str', 'title': 'str', 'size_locale': 'str', 'size_title': 'str', 'size_descriptor': 'str', 'size_all_descriptor': 'str', 'gender': 'str', 'condition': 'str', 'minimum_bid': 'int', 'uniq_bids': 'bool', 'primary_category': 'str', 'secondary_category': 'str' } attribute_map = { 'product_category': 'product_category', 'url_key': 'url_key', 'slug': 'slug', 'brand': 'brand', 'ticker': 'ticker', 'style_id': 'style_id', 'model': 'model', 'name': 'name', 'title': 'title', 'size_locale': 'size_locale', 'size_title': 'size_title', 'size_descriptor': 'size_descriptor', 'size_all_descriptor': 'size_all_descriptor', 'gender': 'gender', 'condition': 'condition', 'minimum_bid': 'minimum_bid', 'uniq_bids': 'uniq_bids', 'primary_category': 'primary_category', 'secondary_category': 'secondary_category' } def __init__(self, product_category=None, url_key=None, slug=None, brand=None, ticker=None, style_id=None, model=None, name=None, title=None, size_locale=None, size_title=None, size_descriptor=None, size_all_descriptor=None, gender=None, condition=None, minimum_bid=None, uniq_bids=None, primary_category=None, secondary_category=None): # noqa: E501 """ProductInfoProductAttributes - a model defined in Swagger""" # noqa: E501 self._product_category = None self._url_key = None self._slug = None self._brand = None self._ticker = None self._style_id = None self._model = None self._name = None self._title = None self._size_locale = None self._size_title = None self._size_descriptor = None self._size_all_descriptor = None self._gender = None self._condition = None self._minimum_bid = None self._uniq_bids = None self._primary_category = None self._secondary_category = None self.discriminator = None if product_category is not None: self.product_category = product_category if url_key is not None: self.url_key = url_key if slug is not None: self.slug = slug if brand is not None: self.brand = brand if ticker is not None: self.ticker = ticker if style_id is not None: self.style_id = style_id if model is not None: self.model = model if name is not None: self.name = name if title is not None: self.title = title if size_locale is not None: self.size_locale = size_locale if size_title is not None: self.size_title = size_title if size_descriptor is not None: self.size_descriptor = size_descriptor if size_all_descriptor is not None: self.size_all_descriptor = size_all_descriptor if gender is not None: self.gender = gender if condition is not None: self.condition = condition if minimum_bid is not None: self.minimum_bid = minimum_bid if uniq_bids is not None: self.uniq_bids = uniq_bids if primary_category is not None: self.primary_category = primary_category if secondary_category is not None: self.secondary_category = secondary_category @property def product_category(self): """Gets the product_category of this ProductInfoProductAttributes. # noqa: E501 :return: The product_category of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._product_category @product_category.setter def product_category(self, product_category): """Sets the product_category of this ProductInfoProductAttributes. :param product_category: The product_category of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._product_category = product_category @property def url_key(self): """Gets the url_key of this ProductInfoProductAttributes. # noqa: E501 :return: The url_key of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._url_key @url_key.setter def url_key(self, url_key): """Sets the url_key of this ProductInfoProductAttributes. :param url_key: The url_key of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._url_key = url_key @property def slug(self): """Gets the slug of this ProductInfoProductAttributes. # noqa: E501 :return: The slug of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._slug @slug.setter def slug(self, slug): """Sets the slug of this ProductInfoProductAttributes. :param slug: The slug of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._slug = slug @property def brand(self): """Gets the brand of this ProductInfoProductAttributes. # noqa: E501 :return: The brand of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._brand @brand.setter def brand(self, brand): """Sets the brand of this ProductInfoProductAttributes. :param brand: The brand of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._brand = brand @property def ticker(self): """Gets the ticker of this ProductInfoProductAttributes. # noqa: E501 :return: The ticker of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._ticker @ticker.setter def ticker(self, ticker): """Sets the ticker of this ProductInfoProductAttributes. :param ticker: The ticker of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._ticker = ticker @property def style_id(self): """Gets the style_id of this ProductInfoProductAttributes. # noqa: E501 :return: The style_id of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._style_id @style_id.setter def style_id(self, style_id): """Sets the style_id of this ProductInfoProductAttributes. :param style_id: The style_id of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._style_id = style_id @property def model(self): """Gets the model of this ProductInfoProductAttributes. # noqa: E501 :return: The model of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._model @model.setter def model(self, model): """Sets the model of this ProductInfoProductAttributes. :param model: The model of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._model = model @property def name(self): """Gets the name of this ProductInfoProductAttributes. # noqa: E501 :return: The name of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ProductInfoProductAttributes. :param name: The name of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._name = name @property def title(self): """Gets the title of this ProductInfoProductAttributes. # noqa: E501 :return: The title of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._title @title.setter def title(self, title): """Sets the title of this ProductInfoProductAttributes. :param title: The title of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._title = title @property def size_locale(self): """Gets the size_locale of this ProductInfoProductAttributes. # noqa: E501 :return: The size_locale of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._size_locale @size_locale.setter def size_locale(self, size_locale): """Sets the size_locale of this ProductInfoProductAttributes. :param size_locale: The size_locale of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._size_locale = size_locale @property def size_title(self): """Gets the size_title of this ProductInfoProductAttributes. # noqa: E501 :return: The size_title of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._size_title @size_title.setter def size_title(self, size_title): """Sets the size_title of this ProductInfoProductAttributes. :param size_title: The size_title of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._size_title = size_title @property def size_descriptor(self): """Gets the size_descriptor of this ProductInfoProductAttributes. # noqa: E501 :return: The size_descriptor of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._size_descriptor @size_descriptor.setter def size_descriptor(self, size_descriptor): """Sets the size_descriptor of this ProductInfoProductAttributes. :param size_descriptor: The size_descriptor of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._size_descriptor = size_descriptor @property def size_all_descriptor(self): """Gets the size_all_descriptor of this ProductInfoProductAttributes. # noqa: E501 :return: The size_all_descriptor of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._size_all_descriptor @size_all_descriptor.setter def size_all_descriptor(self, size_all_descriptor): """Sets the size_all_descriptor of this ProductInfoProductAttributes. :param size_all_descriptor: The size_all_descriptor of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._size_all_descriptor = size_all_descriptor @property def gender(self): """Gets the gender of this ProductInfoProductAttributes. # noqa: E501 :return: The gender of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._gender @gender.setter def gender(self, gender): """Sets the gender of this ProductInfoProductAttributes. :param gender: The gender of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._gender = gender @property def condition(self): """Gets the condition of this ProductInfoProductAttributes. # noqa: E501 :return: The condition of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._condition @condition.setter def condition(self, condition): """Sets the condition of this ProductInfoProductAttributes. :param condition: The condition of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._condition = condition @property def minimum_bid(self): """Gets the minimum_bid of this ProductInfoProductAttributes. # noqa: E501 :return: The minimum_bid of this ProductInfoProductAttributes. # noqa: E501 :rtype: int """ return self._minimum_bid @minimum_bid.setter def minimum_bid(self, minimum_bid): """Sets the minimum_bid of this ProductInfoProductAttributes. :param minimum_bid: The minimum_bid of this ProductInfoProductAttributes. # noqa: E501 :type: int """ self._minimum_bid = minimum_bid @property def uniq_bids(self): """Gets the uniq_bids of this ProductInfoProductAttributes. # noqa: E501 :return: The uniq_bids of this ProductInfoProductAttributes. # noqa: E501 :rtype: bool """ return self._uniq_bids @uniq_bids.setter def uniq_bids(self, uniq_bids): """Sets the uniq_bids of this ProductInfoProductAttributes. :param uniq_bids: The uniq_bids of this ProductInfoProductAttributes. # noqa: E501 :type: bool """ self._uniq_bids = uniq_bids @property def primary_category(self): """Gets the primary_category of this ProductInfoProductAttributes. # noqa: E501 :return: The primary_category of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._primary_category @primary_category.setter def primary_category(self, primary_category): """Sets the primary_category of this ProductInfoProductAttributes. :param primary_category: The primary_category of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._primary_category = primary_category @property def secondary_category(self): """Gets the secondary_category of this ProductInfoProductAttributes. # noqa: E501 :return: The secondary_category of this ProductInfoProductAttributes. # noqa: E501 :rtype: str """ return self._secondary_category @secondary_category.setter def secondary_category(self, secondary_category): """Sets the secondary_category of this ProductInfoProductAttributes. :param secondary_category: The secondary_category of this ProductInfoProductAttributes. # noqa: E501 :type: str """ self._secondary_category = secondary_category def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ProductInfoProductAttributes): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/io_stockx/models/search_hit.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from io_stockx.models.search_hit_media import SearchHitMedia # noqa: F401,E501 from io_stockx.models.search_hit_searchable_traits import SearchHitSearchableTraits # noqa: F401,E501 class SearchHit(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str', 'brand': 'str', 'thumbnail_url': 'str', 'media': 'SearchHitMedia', 'url': 'str', 'release_date': 'str', 'categories': 'list[str]', 'product_category': 'str', 'ticker_symbol': 'str', 'style_id': 'str', 'make': 'str', 'model': 'str', 'short_description': 'str', 'gender': 'str', 'colorway': 'str', 'price': 'int', 'description': 'str', 'highest_bid': 'str', 'total_dollars': 'str', 'lowest_ask': 'str', 'last_sale': 'str', 'sales_last_72': 'int', 'deadstock_sold': 'int', 'quality_bid': 'int', 'active': 'int', 'new_release': 'str', 'searchable_traits': 'SearchHitSearchableTraits', 'object_id': 'str', 'annual_high': 'str', 'annual_low': 'str', 'deadstock_range_low': 'str', 'deadstock_range_high': 'str', 'average_deadstock_price': 'str', 'change_value': 'str' } attribute_map = { 'name': 'name', 'brand': 'brand', 'thumbnail_url': 'thumbnail_url', 'media': 'media', 'url': 'url', 'release_date': 'release_date', 'categories': 'categories', 'product_category': 'product_category', 'ticker_symbol': 'ticker_symbol', 'style_id': 'style_id', 'make': 'make', 'model': 'model', 'short_description': 'short_description', 'gender': 'gender', 'colorway': 'colorway', 'price': 'price', 'description': 'description', 'highest_bid': 'highest_bid', 'total_dollars': 'total_dollars', 'lowest_ask': 'lowest_ask', 'last_sale': 'last_sale', 'sales_last_72': 'sales_last_72', 'deadstock_sold': 'deadstock_sold', 'quality_bid': 'quality_bid', 'active': 'active', 'new_release': 'new_release', 'searchable_traits': 'searchable_traits', 'object_id': 'objectID', 'annual_high': 'annual_high', 'annual_low': 'annual_low', 'deadstock_range_low': 'deadstock_range_low', 'deadstock_range_high': 'deadstock_range_high', 'average_deadstock_price': 'average_deadstock_price', 'change_value': 'change_value' } def __init__(self, name=None, brand=None, thumbnail_url=None, media=None, url=None, release_date=None, categories=None, product_category=None, ticker_symbol=None, style_id=None, make=None, model=None, short_description=None, gender=None, colorway=None, price=None, description=None, highest_bid=None, total_dollars=None, lowest_ask=None, last_sale=None, sales_last_72=None, deadstock_sold=None, quality_bid=None, active=None, new_release=None, searchable_traits=None, object_id=None, annual_high=None, annual_low=None, deadstock_range_low=None, deadstock_range_high=None, average_deadstock_price=None, change_value=None): # noqa: E501 """SearchHit - a model defined in Swagger""" # noqa: E501 self._name = None self._brand = None self._thumbnail_url = None self._media = None self._url = None self._release_date = None self._categories = None self._product_category = None self._ticker_symbol = None self._style_id = None self._make = None self._model = None self._short_description = None self._gender = None self._colorway = None self._price = None self._description = None self._highest_bid = None self._total_dollars = None self._lowest_ask = None self._last_sale = None self._sales_last_72 = None self._deadstock_sold = None self._quality_bid = None self._active = None self._new_release = None self._searchable_traits = None self._object_id = None self._annual_high = None self._annual_low = None self._deadstock_range_low = None self._deadstock_range_high = None self._average_deadstock_price = None self._change_value = None self.discriminator = None if name is not None: self.name = name if brand is not None: self.brand = brand if thumbnail_url is not None: self.thumbnail_url = thumbnail_url if media is not None: self.media = media if url is not None: self.url = url if release_date is not None: self.release_date = release_date if categories is not None: self.categories = categories if product_category is not None: self.product_category = product_category if ticker_symbol is not None: self.ticker_symbol = ticker_symbol if style_id is not None: self.style_id = style_id if make is not None: self.make = make if model is not None: self.model = model if short_description is not None: self.short_description = short_description if gender is not None: self.gender = gender if colorway is not None: self.colorway = colorway if price is not None: self.price = price if description is not None: self.description = description if highest_bid is not None: self.highest_bid = highest_bid if total_dollars is not None: self.total_dollars = total_dollars if lowest_ask is not None: self.lowest_ask = lowest_ask if last_sale is not None: self.last_sale = last_sale if sales_last_72 is not None: self.sales_last_72 = sales_last_72 if deadstock_sold is not None: self.deadstock_sold = deadstock_sold if quality_bid is not None: self.quality_bid = quality_bid if active is not None: self.active = active if new_release is not None: self.new_release = new_release if searchable_traits is not None: self.searchable_traits = searchable_traits if object_id is not None: self.object_id = object_id if annual_high is not None: self.annual_high = annual_high if annual_low is not None: self.annual_low = annual_low if deadstock_range_low is not None: self.deadstock_range_low = deadstock_range_low if deadstock_range_high is not None: self.deadstock_range_high = deadstock_range_high if average_deadstock_price is not None: self.average_deadstock_price = average_deadstock_price if change_value is not None: self.change_value = change_value @property def name(self): """Gets the name of this SearchHit. # noqa: E501 :return: The name of this SearchHit. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this SearchHit. :param name: The name of this SearchHit. # noqa: E501 :type: str """ self._name = name @property def brand(self): """Gets the brand of this SearchHit. # noqa: E501 :return: The brand of this SearchHit. # noqa: E501 :rtype: str """ return self._brand @brand.setter def brand(self, brand): """Sets the brand of this SearchHit. :param brand: The brand of this SearchHit. # noqa: E501 :type: str """ self._brand = brand @property def thumbnail_url(self): """Gets the thumbnail_url of this SearchHit. # noqa: E501 :return: The thumbnail_url of this SearchHit. # noqa: E501 :rtype: str """ return self._thumbnail_url @thumbnail_url.setter def thumbnail_url(self, thumbnail_url): """Sets the thumbnail_url of this SearchHit. :param thumbnail_url: The thumbnail_url of this SearchHit. # noqa: E501 :type: str """ self._thumbnail_url = thumbnail_url @property def media(self): """Gets the media of this SearchHit. # noqa: E501 :return: The media of this SearchHit. # noqa: E501 :rtype: SearchHitMedia """ return self._media @media.setter def media(self, media): """Sets the media of this SearchHit. :param media: The media of this SearchHit. # noqa: E501 :type: SearchHitMedia """ self._media = media @property def url(self): """Gets the url of this SearchHit. # noqa: E501 :return: The url of this SearchHit. # noqa: E501 :rtype: str """ return self._url @url.setter def url(self, url): """Sets the url of this SearchHit. :param url: The url of this SearchHit. # noqa: E501 :type: str """ self._url = url @property def release_date(self): """Gets the release_date of this SearchHit. # noqa: E501 :return: The release_date of this SearchHit. # noqa: E501 :rtype: str """ return self._release_date @release_date.setter def release_date(self, release_date): """Sets the release_date of this SearchHit. :param release_date: The release_date of this SearchHit. # noqa: E501 :type: str """ self._release_date = release_date @property def categories(self): """Gets the categories of this SearchHit. # noqa: E501 :return: The categories of this SearchHit. # noqa: E501 :rtype: list[str] """ return self._categories @categories.setter def categories(self, categories): """Sets the categories of this SearchHit. :param categories: The categories of this SearchHit. # noqa: E501 :type: list[str] """ self._categories = categories @property def product_category(self): """Gets the product_category of this SearchHit. # noqa: E501 :return: The product_category of this SearchHit. # noqa: E501 :rtype: str """ return self._product_category @product_category.setter def product_category(self, product_category): """Sets the product_category of this SearchHit. :param product_category: The product_category of this SearchHit. # noqa: E501 :type: str """ self._product_category = product_category @property def ticker_symbol(self): """Gets the ticker_symbol of this SearchHit. # noqa: E501 :return: The ticker_symbol of this SearchHit. # noqa: E501 :rtype: str """ return self._ticker_symbol @ticker_symbol.setter def ticker_symbol(self, ticker_symbol): """Sets the ticker_symbol of this SearchHit. :param ticker_symbol: The ticker_symbol of this SearchHit. # noqa: E501 :type: str """ self._ticker_symbol = ticker_symbol @property def style_id(self): """Gets the style_id of this SearchHit. # noqa: E501 :return: The style_id of this SearchHit. # noqa: E501 :rtype: str """ return self._style_id @style_id.setter def style_id(self, style_id): """Sets the style_id of this SearchHit. :param style_id: The style_id of this SearchHit. # noqa: E501 :type: str """ self._style_id = style_id @property def make(self): """Gets the make of this SearchHit. # noqa: E501 :return: The make of this SearchHit. # noqa: E501 :rtype: str """ return self._make @make.setter def make(self, make): """Sets the make of this SearchHit. :param make: The make of this SearchHit. # noqa: E501 :type: str """ self._make = make @property def model(self): """Gets the model of this SearchHit. # noqa: E501 :return: The model of this SearchHit. # noqa: E501 :rtype: str """ return self._model @model.setter def model(self, model): """Sets the model of this SearchHit. :param model: The model of this SearchHit. # noqa: E501 :type: str """ self._model = model @property def short_description(self): """Gets the short_description of this SearchHit. # noqa: E501 :return: The short_description of this SearchHit. # noqa: E501 :rtype: str """ return self._short_description @short_description.setter def short_description(self, short_description): """Sets the short_description of this SearchHit. :param short_description: The short_description of this SearchHit. # noqa: E501 :type: str """ self._short_description = short_description @property def gender(self): """Gets the gender of this SearchHit. # noqa: E501 :return: The gender of this SearchHit. # noqa: E501 :rtype: str """ return self._gender @gender.setter def gender(self, gender): """Sets the gender of this SearchHit. :param gender: The gender of this SearchHit. # noqa: E501 :type: str """ self._gender = gender @property def colorway(self): """Gets the colorway of this SearchHit. # noqa: E501 :return: The colorway of this SearchHit. # noqa: E501 :rtype: str """ return self._colorway @colorway.setter def colorway(self, colorway): """Sets the colorway of this SearchHit. :param colorway: The colorway of this SearchHit. # noqa: E501 :type: str """ self._colorway = colorway @property def price(self): """Gets the price of this SearchHit. # noqa: E501 :return: The price of this SearchHit. # noqa: E501 :rtype: int """ return self._price @price.setter def price(self, price): """Sets the price of this SearchHit. :param price: The price of this SearchHit. # noqa: E501 :type: int """ self._price = price @property def description(self): """Gets the description of this SearchHit. # noqa: E501 :return: The description of this SearchHit. # noqa: E501 :rtype: str """ return self._description @description.setter def description(self, description): """Sets the description of this SearchHit. :param description: The description of this SearchHit. # noqa: E501 :type: str """ self._description = description @property def highest_bid(self): """Gets the highest_bid of this SearchHit. # noqa: E501 :return: The highest_bid of this SearchHit. # noqa: E501 :rtype: str """ return self._highest_bid @highest_bid.setter def highest_bid(self, highest_bid): """Sets the highest_bid of this SearchHit. :param highest_bid: The highest_bid of this SearchHit. # noqa: E501 :type: str """ self._highest_bid = highest_bid @property def total_dollars(self): """Gets the total_dollars of this SearchHit. # noqa: E501 :return: The total_dollars of this SearchHit. # noqa: E501 :rtype: str """ return self._total_dollars @total_dollars.setter def total_dollars(self, total_dollars): """Sets the total_dollars of this SearchHit. :param total_dollars: The total_dollars of this SearchHit. # noqa: E501 :type: str """ self._total_dollars = total_dollars @property def lowest_ask(self): """Gets the lowest_ask of this SearchHit. # noqa: E501 :return: The lowest_ask of this SearchHit. # noqa: E501 :rtype: str """ return self._lowest_ask @lowest_ask.setter def lowest_ask(self, lowest_ask): """Sets the lowest_ask of this SearchHit. :param lowest_ask: The lowest_ask of this SearchHit. # noqa: E501 :type: str """ self._lowest_ask = lowest_ask @property def last_sale(self): """Gets the last_sale of this SearchHit. # noqa: E501 :return: The last_sale of this SearchHit. # noqa: E501 :rtype: str """ return self._last_sale @last_sale.setter def last_sale(self, last_sale): """Sets the last_sale of this SearchHit. :param last_sale: The last_sale of this SearchHit. # noqa: E501 :type: str """ self._last_sale = last_sale @property def sales_last_72(self): """Gets the sales_last_72 of this SearchHit. # noqa: E501 :return: The sales_last_72 of this SearchHit. # noqa: E501 :rtype: int """ return self._sales_last_72 @sales_last_72.setter def sales_last_72(self, sales_last_72): """Sets the sales_last_72 of this SearchHit. :param sales_last_72: The sales_last_72 of this SearchHit. # noqa: E501 :type: int """ self._sales_last_72 = sales_last_72 @property def deadstock_sold(self): """Gets the deadstock_sold of this SearchHit. # noqa: E501 :return: The deadstock_sold of this SearchHit. # noqa: E501 :rtype: int """ return self._deadstock_sold @deadstock_sold.setter def deadstock_sold(self, deadstock_sold): """Sets the deadstock_sold of this SearchHit. :param deadstock_sold: The deadstock_sold of this SearchHit. # noqa: E501 :type: int """ self._deadstock_sold = deadstock_sold @property def quality_bid(self): """Gets the quality_bid of this SearchHit. # noqa: E501 :return: The quality_bid of this SearchHit. # noqa: E501 :rtype: int """ return self._quality_bid @quality_bid.setter def quality_bid(self, quality_bid): """Sets the quality_bid of this SearchHit. :param quality_bid: The quality_bid of this SearchHit. # noqa: E501 :type: int """ self._quality_bid = quality_bid @property def active(self): """Gets the active of this SearchHit. # noqa: E501 :return: The active of this SearchHit. # noqa: E501 :rtype: int """ return self._active @active.setter def active(self, active): """Sets the active of this SearchHit. :param active: The active of this SearchHit. # noqa: E501 :type: int """ self._active = active @property def new_release(self): """Gets the new_release of this SearchHit. # noqa: E501 :return: The new_release of this SearchHit. # noqa: E501 :rtype: str """ return self._new_release @new_release.setter def new_release(self, new_release): """Sets the new_release of this SearchHit. :param new_release: The new_release of this SearchHit. # noqa: E501 :type: str """ self._new_release = new_release @property def searchable_traits(self): """Gets the searchable_traits of this SearchHit. # noqa: E501 :return: The searchable_traits of this SearchHit. # noqa: E501 :rtype: SearchHitSearchableTraits """ return self._searchable_traits @searchable_traits.setter def searchable_traits(self, searchable_traits): """Sets the searchable_traits of this SearchHit. :param searchable_traits: The searchable_traits of this SearchHit. # noqa: E501 :type: SearchHitSearchableTraits """ self._searchable_traits = searchable_traits @property def object_id(self): """Gets the object_id of this SearchHit. # noqa: E501 :return: The object_id of this SearchHit. # noqa: E501 :rtype: str """ return self._object_id @object_id.setter def object_id(self, object_id): """Sets the object_id of this SearchHit. :param object_id: The object_id of this SearchHit. # noqa: E501 :type: str """ self._object_id = object_id @property def annual_high(self): """Gets the annual_high of this SearchHit. # noqa: E501 :return: The annual_high of this SearchHit. # noqa: E501 :rtype: str """ return self._annual_high @annual_high.setter def annual_high(self, annual_high): """Sets the annual_high of this SearchHit. :param annual_high: The annual_high of this SearchHit. # noqa: E501 :type: str """ self._annual_high = annual_high @property def annual_low(self): """Gets the annual_low of this SearchHit. # noqa: E501 :return: The annual_low of this SearchHit. # noqa: E501 :rtype: str """ return self._annual_low @annual_low.setter def annual_low(self, annual_low): """Sets the annual_low of this SearchHit. :param annual_low: The annual_low of this SearchHit. # noqa: E501 :type: str """ self._annual_low = annual_low @property def deadstock_range_low(self): """Gets the deadstock_range_low of this SearchHit. # noqa: E501 :return: The deadstock_range_low of this SearchHit. # noqa: E501 :rtype: str """ return self._deadstock_range_low @deadstock_range_low.setter def deadstock_range_low(self, deadstock_range_low): """Sets the deadstock_range_low of this SearchHit. :param deadstock_range_low: The deadstock_range_low of this SearchHit. # noqa: E501 :type: str """ self._deadstock_range_low = deadstock_range_low @property def deadstock_range_high(self): """Gets the deadstock_range_high of this SearchHit. # noqa: E501 :return: The deadstock_range_high of this SearchHit. # noqa: E501 :rtype: str """ return self._deadstock_range_high @deadstock_range_high.setter def deadstock_range_high(self, deadstock_range_high): """Sets the deadstock_range_high of this SearchHit. :param deadstock_range_high: The deadstock_range_high of this SearchHit. # noqa: E501 :type: str """ self._deadstock_range_high = deadstock_range_high @property def average_deadstock_price(self): """Gets the average_deadstock_price of this SearchHit. # noqa: E501 :return: The average_deadstock_price of this SearchHit. # noqa: E501 :rtype: str """ return self._average_deadstock_price @average_deadstock_price.setter def average_deadstock_price(self, average_deadstock_price): """Sets the average_deadstock_price of this SearchHit. :param average_deadstock_price: The average_deadstock_price of this SearchHit. # noqa: E501 :type: str """ self._average_deadstock_price = average_deadstock_price @property def change_value(self): """Gets the change_value of this SearchHit. # noqa: E501 :return: The change_value of this SearchHit. # noqa: E501 :rtype: str """ return self._change_value @change_value.setter def change_value(self, change_value): """Sets the change_value of this SearchHit. :param change_value: The change_value of this SearchHit. # noqa: E501 :type: str """ self._change_value = change_value def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SearchHit): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
/sdk/python/lib/test/test_stock_x_api.py
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import io_stockx from io_stockx.api.stock_x_api import StockXApi # noqa: E501 from io_stockx.rest import ApiException class TestStockXApi(unittest.TestCase): """StockXApi unit test stubs""" def setUp(self): self.api = io_stockx.api.stock_x_api.StockXApi() # noqa: E501 def tearDown(self): pass def test_delete_portfolio(self): """Test case for delete_portfolio Deletes a portfolio item from the market with the specified id. # noqa: E501 """ pass def test_delete_webhook(self): """Test case for delete_webhook """ pass def test_get_open_orders(self): """Test case for get_open_orders """ pass def test_get_portfolio(self): """Test case for get_portfolio Returns a market portfolio identified by request parameters. # noqa: E501 """ pass def test_get_portfolio_item(self): """Test case for get_portfolio_item """ pass def test_get_product_by_id(self): """Test case for get_product_by_id """ pass def test_get_product_market_data(self): """Test case for get_product_market_data Provides historical market data for a given product. # noqa: E501 """ pass def test_get_subscriptions(self): """Test case for get_subscriptions """ pass def test_get_webhook(self): """Test case for get_webhook """ pass def test_get_webhooks(self): """Test case for get_webhooks """ pass def test_login(self): """Test case for login Attempts to log the user in with a username and password. # noqa: E501 """ pass def test_lookup_product(self): """Test case for lookup_product """ pass def test_new_portfolio_ask(self): """Test case for new_portfolio_ask Creates a new seller ask on the market for a given product. # noqa: E501 """ pass def test_new_portfolio_bid(self): """Test case for new_portfolio_bid Creates a new buyer bid on the market for a given product. # noqa: E501 """ pass def test_post_webhooks(self): """Test case for post_webhooks """ pass def test_search(self): """Test case for search Searches for products by keyword. # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
/sdk/python/src/example_constants.py
from __future__ import print_function import time import io_stockx from io_stockx.rest import ApiException from pprint import pprint class ExampleConstants: AWS_API_KEY = "<API Key>" STOCKX_USERNAME = "<StockX Username>" STOCKX_PASSWORD = "<StockX Password>" DEMO_PRODUCT_ID = "air-jordan-1-retro-high-off-white-chicago" DEMO_CUSTOMER_ID = "1471698" ENABLE_DEBUG = True JWT_HEADER = "Jwt-Authorization"
/sdk/python/src/login.py
from __future__ import print_function import time import io_stockx from example_constants import ExampleConstants from io_stockx.rest import ApiException from pprint import pprint # Configure API key authorization: api_key configuration = io_stockx.Configuration() configuration.host = "https://gateway.stockx.com/stage" configuration.api_key['x-api-key'] = ExampleConstants.AWS_API_KEY # create an instance of the API class stockx = io_stockx.StockXApi(io_stockx.ApiClient(configuration)) login = io_stockx.LoginRequest(email=ExampleConstants.STOCKX_USERNAME, password=ExampleConstants.STOCKX_PASSWORD) try: # Attempts to log the user in with a username and password. api_response = stockx.login(login) pprint(api_response) except ApiException as e: print("Exception when calling StockXApi->login: %s\n" % e)
/sdk/python/src/place_new_lowest_ask_example.py
from __future__ import print_function import time import io_stockx from example_constants import ExampleConstants from io_stockx.rest import ApiException from pprint import pprint # Configure API key authorization: api_key configuration = io_stockx.Configuration() configuration.host = "https://gateway.stockx.com/stage" configuration.api_key['x-api-key'] = ExampleConstants.AWS_API_KEY # create an instance of the API class stockx = io_stockx.StockXApi(io_stockx.ApiClient(configuration)) login = io_stockx.LoginRequest(email=ExampleConstants.STOCKX_USERNAME, password=ExampleConstants.STOCKX_PASSWORD) try: # Attempts to log the user in with a username and password. api_response = stockx.login_with_http_info(login) # Get the customer object after login customer = api_response[0] # Get the login's assigned jwt token jwt_token = api_response[2]['Jwt-Authorization'] # Use the jwt token to authenticate future requests stockx.api_client.set_default_header('jwt-authorization', jwt_token) # Search for a type of product search_result = stockx.search('Jordan Retro Black Cat') first_hit = search_result.hits[0] style_id = first_hit.style_id # Lookup the first product returned from the search product = stockx.lookup_product(identifier=style_id, size='11') # Get the current market data for the product (highest bid info, etc.) attributes = product.data[0].attributes id = product.data[0].id uuid = attributes.product_uuid # Get the product market data market_data = stockx.get_product_market_data(id, sku=uuid) # Get the lowest ask for the product and decrement it lowest_ask = market_data.market.lowest_ask lowest_ask += 1 # Create a portfolio item request with a higher bid item = io_stockx.PortfolioRequestPortfolioItem() item.amount = lowest_ask item.sku_uuid = "bae25b67-a721-4f57-ad5a-79973c7d0a5c" item.matched_with_date = "2018-12-12T05:00:00+0000" item.expires_at = "2018-12-12T12:39:07+00:00" request = io_stockx.PortfolioRequest() request.portfolio_item = item request.customer = customer request.timezone = "America/Detroit" # Submit the ask ask_resp = stockx.new_portfolio_ask(request) pprint(ask_resp) except ApiException as e: print("Exception when calling StockXApi->new_portfolio_ask: %s\n" % e)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
jlamonade/splitteroni
refs/heads/master
{"/splitter/urls.py": ["/splitter/views.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/tests.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"], "/splitter/models.py": ["/splitter/utils.py"]}
└── ├── config │ └── settings.py ├── pages │ └── tests.py ├── splitter │ ├── admin.py │ ├── apps.py │ ├── forms.py │ ├── migrations │ │ ├── 0001_initial.py │ │ ├── 0002_auto_20201007_2310.py │ │ ├── 0003_auto_20201007_2339.py │ │ ├── 0004_auto_20201008_2206.py │ │ ├── 0005_merge_20201009_1438.py │ │ ├── 0006_auto_20201009_1603.py │ │ ├── 0007_auto_20201009_1606.py │ │ ├── 0009_merge_20201012_2025.py │ │ ├── 0010_bill_tax_percent.py │ │ ├── 0011_bill_tip_percent.py │ │ └── 0012_bill_session.py │ ├── mixins.py │ ├── models.py │ ├── tests.py │ ├── urls.py │ ├── utils.py │ └── views.py └── users └── tests.py
/config/settings.py
""" Django settings for config project. Generated by 'django-admin startproject' using Django 3.1.1. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path from environs import Env env = Env() env.read_env() # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = env("DJANGO_SECRET_KEY") # SECURITY WARNING: don't run with debug turned on in production! DEBUG = env.bool("DJANGO_DEBUG", default=False) ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=[]) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'whitenoise.runserver_nostatic', 'django.contrib.staticfiles', 'django.contrib.sites', # Third party apps 'crispy_forms', 'allauth', 'allauth.account', 'debug_toolbar', # My apps 'users', 'pages', 'splitter', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ] # # Cache settings # CACHE_MIDDLEWARE_ALIAS = 'default' # CACHE_MIDDLEWARE_SECONDS = 604800 # CACHE_MIDDLEWARE_KEY_PREFIX = '' ROOT_URLCONF = 'config.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [str(BASE_DIR.joinpath('templates'))], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'config.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': env.dj_db_url( "DATABASE_URL", default="postgres://postgres@db/postgres") } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ # Static file settings STATIC_URL = '/static/' STATICFILES_DIRS = (str(BASE_DIR.joinpath('static')),) STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles')) STATICFILES_FINDERS = [ "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", ] STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage' AUTH_USER_MODEL = 'users.CustomUser' # Crispy settings CRISPY_TEMPLATE_PACK = 'bootstrap4' # django-allauth config LOGIN_REDIRECT_URL = 'home' ACCOUNT_LOGOUT_REDIRECT_URL = 'home' SITE_ID = 1 AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') ACCOUNT_USERNAME_REQUIRED = False ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_AUTHENTICATION_METHOD = "email" ACCOUNT_UNIQUE_EMAIL = True ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False # Email settings DEFAULT_FROM_EMAIL = 'lamgoesbam@gmail.com' EMAIL_HOST = 'smtp.sendgrid.net' EMAIL_HOST_USER = 'apikey' EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default='') EMAIL_PORT = 587 EMAIL_USE_TLS = True # django-debug-toolbar import socket hostname, _, ips = socket.gethostbyname_ex(socket.gethostname()) INTERNAL_IPS = [ip[:-1] + "1" for ip in ips] # Security settings SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True) SECURE_HSTS_SECONDS = env.int("DJANGO_SECURE_HSTS_SECONDS", default=2592000) SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True) SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True) SESSION_COOKIE_SECURE = env.bool("DJANGO_SESSION_COOKIE_SECURE", default=True) CSRF_COOKIE_SECURE = env.bool("DJANGO_CSRF_COOKIE_SECURE", default=True) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
/pages/tests.py
from django.test import TestCase from django.urls import reverse, resolve from django.contrib.auth import get_user_model from .views import HomePageView # Create your tests here. class HomepageTests(TestCase): def setUp(self): url = reverse('home') self.response = self.client.get(url) self.user = get_user_model().objects.create_user( username='testuser', email='testuser@email.com', password='testpass', ) def test_homepage_status_code(self): self.assertEqual(self.response.status_code, 200) def test_homepage_template(self): self.assertTemplateUsed(self.response, 'home.html') def test_homepage_contains_correct_html_while_logged_out(self): self.assertContains(self.response, 'Create a new split. Log in or sign up to save your splits.') self.assertContains(self.response, 'Sign up') def test_homepage_contains_correct_html_while_logged_in(self): self.client.login(email='testuser@email.com', password='testpass') self.assertContains(self.response, 'Create a new split.') def test_homepage_does_not_contain_incorrect_html(self): self.assertNotContains(self.response, 'Should not contain this') def test_homepage_url_resolves_homepageview(self): view = resolve('/') self.assertEqual( view.func.__name__, HomePageView.as_view().__name__ )
/splitter/admin.py
from django.contrib import admin from .models import Bill, Person, Item # Register your models here. admin.site.register(Bill) admin.site.register(Person) admin.site.register(Item)
/splitter/apps.py
from django.apps import AppConfig class SplitterConfig(AppConfig): name = 'splitter'
/splitter/forms.py
from django.forms import forms, ModelForm from django.utils.translation import gettext_lazy as _ from .models import Bill class BillCreateForm(ModelForm): class Meta: model = Bill fields = ('title', 'tax_percent', 'tip_percent',) labels = { 'title': _('Name'), } help_texts = { 'title': _('The current date and time will be used if name field is empty.'), 'tax_percent': _('Please enter a percentage value. You can leave this blank and change it later.'), 'tip_percent': _('Please enter a percentage value. You can leave this blank and change it later.'), } error_messages = { 'title': { 'max_length': _("Name is too long."), }, 'tax_percent': { 'max_digits': _("Too many digits.") }, 'tip_percent': { 'max_digits': _("Too many digits.") } } class BillUpdateForm(ModelForm): class Meta: model = Bill fields = ('title',) labels = { 'title': _('Name'), } class BillUpdateTaxPercentForm(ModelForm): # def __init__(self, *args, **kwargs): # initial = kwargs.get('initial', {}) # initial['tax'] = 0 # kwargs['initial'] = initial # super(BillUpdateTaxPercentForm, self).__init__(*args, **kwargs) class Meta: model = Bill fields = ('tax_percent',) help_texts = { 'tax_percent': _('Please enter a percent(%) amount.') } class BillUpdateTaxAmountForm(ModelForm): class Meta: model = Bill fields = ('tax',) help_texts = { 'tax': _('Please enter a currency amount.') } class BillUpdateTipForm(ModelForm): class Meta: model = Bill fields = ('tip',) labels = { 'tip': _('Tip/Service Charge'), } help_texts = { 'tip': _('Please enter currency amount.') } class BillUpdateTipPercentForm(ModelForm): class Meta: model = Bill fields = ('tip_percent',) labels = { 'tip_percent': _('Tip/Service Charge Percent'), } help_texts = { 'tip': _('Please enter a percent(%) amount.') }
/splitter/migrations/0001_initial.py
# Generated by Django 3.1.2 on 2020-10-08 02:57 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Bill', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('title', models.CharField(blank=True, max_length=50, null=True)), ('date_created', models.DateTimeField(auto_now_add=True)), ('tip', models.DecimalField(blank=True, decimal_places=2, max_digits=15)), ('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=15)), ('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Person', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('name', models.CharField(max_length=20)), ('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='people', to='splitter.bill')), ], options={ 'verbose_name_plural': 'people', }, ), migrations.CreateModel( name='Item', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('title', models.CharField(max_length=50)), ('price', models.DecimalField(decimal_places=2, max_digits=15)), ('shared', models.BooleanField(default=False)), ('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='splitter.bill')), ('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='splitter.person')), ], ), ]
/splitter/migrations/0002_auto_20201007_2310.py
# Generated by Django 3.1.2 on 2020-10-08 03:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0001_initial'), ] operations = [ migrations.AlterField( model_name='item', name='title', field=models.CharField(blank=True, max_length=50, null=True), ), migrations.AlterField( model_name='person', name='name', field=models.CharField(max_length=30), ), ]
/splitter/migrations/0003_auto_20201007_2339.py
# Generated by Django 3.1.2 on 2020-10-08 03:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0002_auto_20201007_2310'), ] operations = [ migrations.AlterField( model_name='bill', name='tax', field=models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True), ), migrations.AlterField( model_name='bill', name='tip', field=models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True), ), ]
/splitter/migrations/0004_auto_20201008_2206.py
# Generated by Django 3.1.2 on 2020-10-09 02:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0003_auto_20201007_2339'), ] operations = [ migrations.AlterField( model_name='bill', name='tax', field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=15, null=True), ), migrations.AlterField( model_name='bill', name='tip', field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=15, null=True), ), ]
/splitter/migrations/0005_merge_20201009_1438.py
# Generated by Django 3.1.2 on 2020-10-09 14:38 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('splitter', '0004_auto_20201008_2206'), ('splitter', '0004_auto_20201009_1430'), ] operations = [ ]
/splitter/migrations/0006_auto_20201009_1603.py
# Generated by Django 3.1.2 on 2020-10-09 16:03 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0005_merge_20201009_1438'), ] operations = [ migrations.AddIndex( model_name='bill', index=models.Index(fields=['id'], name='id_index'), ), ]
/splitter/migrations/0007_auto_20201009_1606.py
# Generated by Django 3.1.2 on 2020-10-09 16:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0006_auto_20201009_1603'), ] operations = [ migrations.AddIndex( model_name='item', index=models.Index(fields=['id'], name='item_id_index'), ), migrations.AddIndex( model_name='person', index=models.Index(fields=['id'], name='person_id_index'), ), ]
/splitter/migrations/0009_merge_20201012_2025.py
# Generated by Django 3.1.2 on 2020-10-12 20:25 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('splitter', '0008_auto_20201011_1907'), ('splitter', '0008_auto_20201011_0301'), ] operations = [ ]
/splitter/migrations/0010_bill_tax_percent.py
# Generated by Django 3.1.2 on 2020-10-12 14:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0009_merge_20201012_2025'), ] operations = [ migrations.AddField( model_name='bill', name='tax_percent', field=models.DecimalField(blank=True, decimal_places=5, max_digits=10, null=True), ), ]
/splitter/migrations/0011_bill_tip_percent.py
# Generated by Django 3.1.2 on 2020-10-15 04:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0010_bill_tax_percent'), ] operations = [ migrations.AddField( model_name='bill', name='tip_percent', field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, null=True), ), ]
/splitter/migrations/0012_bill_session.py
# Generated by Django 3.1.2 on 2020-10-16 21:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('splitter', '0011_bill_tip_percent'), ] operations = [ migrations.AddField( model_name='bill', name='session', field=models.CharField(blank=True, max_length=40, null=True), ), ]
/splitter/mixins.py
class BillUpdateViewMixin(object): def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill return super().form_valid(form)
/splitter/models.py
import uuid from django.db import models from django.contrib.auth import get_user_model from django.urls import reverse from decimal import Decimal from .utils import _check_tip_tax_then_add # Create your models here. class Bill(models.Model): id = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False ) title = models.CharField(max_length=50, blank=True, null=True) owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True) session = models.CharField(max_length=40, null=True, blank=True) date_created = models.DateTimeField(auto_now_add=True) tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True) tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True) tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True) tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True) class Meta: indexes = [ models.Index(fields=['id'], name='id_index'), ] def __str__(self): if not self.title: return self.date_created.strftime("%m/%d/%y %I:%M%p") else: return self.title.title() def get_tax_amount(self): subtotal = self.get_order_subtotal() if self.tax_percent: tax_amount = (subtotal * (Decimal(self.tax_percent / 100))) bill = Bill.objects.get(id=self.id) bill.tax = tax_amount bill.save() return Decimal(tax_amount).quantize(Decimal('.01')) elif self.tax: return Decimal(self.tax).quantize(Decimal('.01')) else: return 0 def get_tip_amount(self): subtotal = self.get_order_subtotal() + self.get_tax_amount() if self.tip_percent: tip_amount = (subtotal * (Decimal(self.tip_percent / 100))) bill = Bill.objects.get(id=self.id) bill.tip = tip_amount bill.save() return Decimal(tip_amount).quantize(Decimal('.01')) elif self.tip: return Decimal(self.tip).quantize(Decimal('.01')) else: return 0 def get_order_grand_total(self): # Returns the sum of all items including tax and tip total = _check_tip_tax_then_add(self) + self.get_order_subtotal() return Decimal(total) def get_order_subtotal(self): total = 0 items = Item.objects.filter(bill=self) for item in items: total += Decimal(item.price) return Decimal(total) def get_shared_items_total(self): # Returns sum of shared items only total = 0 items = Item.objects.filter(shared=True, bill=self) for item in items: total += Decimal(item.price) return Decimal(total) def get_absolute_url(self): return reverse('bill-detail', args=[self.id]) class Person(models.Model): id = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False ) name = models.CharField(max_length=30) bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people') class Meta: verbose_name_plural = 'people' indexes = [ models.Index(fields=['id'], name='person_id_index'), ] def __str__(self): return self.name.title() def get_shared_items_split(self): # Returns the amount every person owes inside the shared items including tax and tip total = _check_tip_tax_then_add(self.bill) person_count = self.bill.people.all().count() items = self.bill.items.filter(shared=True) for item in items: total += Decimal(item.price) split_amount = Decimal(total / person_count) return Decimal(split_amount) def get_person_total(self): # Returns the sum of the person's items and their share of the shared items total total = 0 items = Item.objects.filter(person=self) for item in items: total += Decimal(item.price) return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01')) def get_absolute_url(self): return reverse('bill-detail', args=[self.bill.id]) class Item(models.Model): id = models.UUIDField( primary_key=True, default=uuid.uuid4, editable=False ) title = models.CharField(max_length=50, blank=True, null=True) price = models.DecimalField(max_digits=15, decimal_places=2) person = models.ForeignKey( Person, on_delete=models.CASCADE, related_name='items', blank=True, null=True ) bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items') shared = models.BooleanField(default=False) class Meta: indexes = [ models.Index(fields=['id'], name='item_id_index'), ] def __str__(self): return self.title def get_absolute_url(self): return reverse('bill-detail', args=[self.bill.id])
/splitter/tests.py
from django.test import TestCase, RequestFactory from django.urls import reverse from django.contrib.auth import get_user_model from decimal import Decimal from .models import Bill, Person, Item # Create your tests here. class SplitterTests(TestCase): def setUp(self): self.user = get_user_model().objects.create_user( username='testuser', email='testuser@email.com', password='testpass', ) self.bill = Bill.objects.create( title='testbill', tip=12.00, tax=13.00, owner=self.user, ) self.person = Person.objects.create( name='testperson', bill=self.bill ) self.item = Item.objects.create( title='testitem', price=14.00, person=self.person, bill=self.bill, ) self.shared_item = Item.objects.create( title='testshareditem', price=15.00, bill=self.bill, shared=True, ) # Testing tax percent/amount self.bill_two = Bill.objects.create( title='testbill2', tip_percent=15, tax_percent=8.875, owner=self.user, ) self.item_two = Item.objects.create( title='testitem2', price=14.00, bill=self.bill_two, shared=True, ) self.bill_total = self.item.price + self.shared_item.price + self.bill.tax + self.bill.tip self.shared_item_total = self.bill.tip + self.bill.tax + self.shared_item.price self.bill_detail_response = self.client.get(self.bill.get_absolute_url()) self.bill_two_response = self.client.get(self.bill_two.get_absolute_url()) def test_bill_object(self): self.assertEqual(self.bill.title, 'testbill') self.assertEqual(self.bill.tip, 12.00) self.assertEqual(self.bill.tax, 13.00) self.assertEqual(self.bill.owner, self.user) def test_bill_list_view_for_logged_in_user(self): self.client.login(email='testuser@email.com', password='testpass') response = self.client.get(reverse('bill-list')) self.assertEqual(response.status_code, 200) self.assertContains(response, 'testbill'.title()) self.assertTemplateUsed(response, 'splitter/bill_list.html') def test_bill_list_view_for_logged_out_users(self): response = self.client.get(reverse('bill-list')) self.assertEqual(response.status_code, 200) def test_bill_detail_view(self): no_response = self.client.get('/bill/12345/') self.assertEqual(self.bill_detail_response.status_code, 200) self.assertEqual(no_response.status_code, 404) self.assertContains(self.bill_detail_response, 'testbill'.title()) self.assertContains(self.bill_detail_response, '12.00') self.assertContains(self.bill_detail_response, '13.00') self.assertContains(self.bill_detail_response, self.item.price) self.assertContains(self.bill_detail_response, self.shared_item.price) self.assertContains(self.bill_detail_response, self.bill_total) self.assertTemplateUsed(self.bill_detail_response, 'splitter/bill_detail.html') def test_person_object(self): self.assertEqual(self.person.name, 'testperson') self.assertEqual(self.person.bill, self.bill) def test_person_object_in_bill_detail_view(self): self.assertContains(self.bill_detail_response, 'testperson'.title()) def test_item_object(self): self.assertEqual(self.item.title, 'testitem') self.assertEqual(self.item.price, 14.00) self.assertEqual(self.item.bill, self.bill) self.assertEqual(self.item.person, self.person) def test_item_object_in_bill_detail_view(self): self.assertContains(self.bill_detail_response, 'testitem') self.assertContains(self.bill_detail_response, 14.00) def test_shared_item_object(self): self.assertEqual(self.shared_item.title, 'testshareditem') self.assertEqual(self.shared_item.price, 15.00) self.assertEqual(self.shared_item.bill, self.bill) def test_shared_item_object_in_bill_detail_view(self): self.assertContains(self.bill_detail_response, 'testshareditem') self.assertContains(self.bill_detail_response, 15.00) def test_bill_model_methods(self): """Tests for Bill model methods.""" # Bill.get_order_total() self.assertEqual(self.bill.get_order_grand_total(), self.bill_total) # Bill.get_shared_items_total() self.assertEqual(self.bill.get_shared_items_total(), self.shared_item.price) def test_person_model_methods(self): """Tests for Person model methods.""" # Person.get_shared_items_split() self.assertEqual(self.person.get_shared_items_split(), self.shared_item_total) # Person.get_person_total() self.assertEqual(self.person.get_person_total(), self.bill.get_order_grand_total()) def test_bill_calculate_tax(self): self.assertContains(self.bill_two_response, Decimal(self.bill_two.get_tax_amount())) self.assertContains(self.bill_two_response, self.bill_two.tax_percent) self.bill_two.tax = 12.00 self.assertContains(self.bill_two_response, Decimal(self.bill_two.tax)) def test_bill_calculate_tip(self): self.assertContains(self.bill_two_response, Decimal(self.bill_two.get_tip_amount())) self.assertContains(self.bill_two_response, self.bill_two.tip_percent) self.bill_two.tip = 12.00 self.assertContains(self.bill_two_response, Decimal(self.bill_two.tip)) def test_bill_saves_session(self): self.client.session.create() self.bill_three = Bill.objects.create( title='testbill3', session=self.client.session.session_key, ) self.assertEqual(self.bill_three.session, self.client.session.session_key)
/splitter/urls.py
from django.urls import path from .views import ( BillCreateView, BillDetailView, PersonCreateView, PersonDeleteView, BillListView, ItemCreateView, ItemDeleteView, SharedItemCreateView, BillUpdateView, BillUpdateTaxPercentView, BillUpdateTaxAmountView, BillUpdateTipAmountView, BillUpdateTipPercentView, BillDeleteView, ) urlpatterns = [ # Bill links path('new/', BillCreateView.as_view(), name='bill-create'), path('<uuid:pk>/', BillDetailView.as_view(), name='bill-detail'), path('archive/', BillListView.as_view(), name='bill-list'), path('<uuid:pk>/update/', BillUpdateView.as_view(), name='bill-update'), path('<uuid:pk>/update-tax-percent/', BillUpdateTaxPercentView.as_view(), name='bill-update-tax-percent'), path('<uuid:pk>/update-tax-amount/', BillUpdateTaxAmountView.as_view(), name='bill-update-tax-amount'), path('<uuid:pk>/update-tip-amount/', BillUpdateTipAmountView.as_view(), name='bill-update-tip'), path('<uuid:pk>/update-tip-percent/', BillUpdateTipPercentView.as_view(), name='bill-update-tip-percent'), path('<uuid:pk>/delete/', BillDeleteView.as_view(), name='bill-delete'), # Person links path('<uuid:pk>/add-person/', PersonCreateView.as_view(), name='person-create'), path('person/<uuid:pk>/delete/', PersonDeleteView.as_view(), name='person-delete'), # Item links path('<uuid:bill_id>/<uuid:person_id>/add-item/', ItemCreateView.as_view(), name='item-create' ), path('<uuid:bill_id>/add-shared-item/', SharedItemCreateView.as_view(), name='shared-item-create' ), path('item/<uuid:pk>/item-delete/', ItemDeleteView.as_view(), name='item-delete'), ]
/splitter/utils.py
from decimal import Decimal def _check_tip_tax_then_add(self): # Checks to see if tip or tax is null before adding them to total else it returns 0 total = 0 tip = self.get_tip_amount() tax = self.get_tax_amount() if tip: total += tip if tax: total += tax return Decimal(total)
/splitter/views.py
from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView from django.shortcuts import get_object_or_404 from django.urls import reverse_lazy from django.http import Http404 from decimal import Decimal from .models import Bill, Person, Item from .forms import (BillCreateForm, BillUpdateForm, BillUpdateTaxPercentForm, BillUpdateTaxAmountForm, BillUpdateTipForm, BillUpdateTipPercentForm) # from .mixins import BillUpdateViewMixin # Create your views here. class BillCreateView(CreateView): template_name = 'splitter/bill_create.html' form_class = BillCreateForm def form_valid(self, form): if self.request.user.is_authenticated: form.instance.owner = self.request.user return super().form_valid(form) else: self.request.session.create() form.instance.session = self.request.session.session_key return super().form_valid(form) class BillDetailView(DetailView): model = Bill template_name = 'splitter/bill_detail.html' context_object_name = 'bill' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['people'] = Person.objects.filter( bill=self.object.id) context['shared_items'] = Item.objects.filter(bill=self.object.id, shared=True) if self.object.tax_percent: context['tax_percentage'] = Decimal(self.object.tax_percent).quantize(Decimal('0.001')) if self.object.tip_percent: context['tip_percentage'] = Decimal(self.object.tip_percent.quantize(Decimal('0'))) return context def get_object(self, queryset=None): pk = self.kwargs.get('pk') obj = get_object_or_404(Bill, id=pk) if self.request.user.is_authenticated and self.request.user == obj.owner: return obj elif self.request.session.session_key == obj.session: return obj else: raise Http404 class PersonCreateView(CreateView): model = Person template_name = 'splitter/person_create.html' fields = ('name',) def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill return super().form_valid(form) class BillDeleteView(DeleteView): model = Bill template_name = 'splitter/bill_delete.html' def get_success_url(self): return reverse_lazy('bill-list') class BillListView(ListView): template_name = 'splitter/bill_list.html' context_object_name = 'bills' def get_queryset(self): if self.request.user.is_authenticated: qs = Bill.objects.filter(owner=self.request.user).order_by('-date_created') elif self.request.session.session_key: qs = Bill.objects.filter(session=self.request.session.session_key).order_by('-date_created') else: qs = None return qs class PersonDeleteView(DeleteView): model = Person template_name = 'splitter/person_delete.html' def get_success_url(self): return reverse_lazy('bill-detail', args=[self.object.bill.id]) class ItemCreateView(CreateView): model = Item template_name = 'splitter/item_create.html' fields = ('title', 'price',) def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['bill_id']) person = get_object_or_404(Person, id=self.kwargs['person_id']) form.instance.bill = bill form.instance.person = person return super().form_valid(form) class SharedItemCreateView(CreateView): model = Item template_name = "splitter/item_create.html" fields = ('title', 'price',) def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['bill_id']) form.instance.bill = bill form.instance.shared = True return super().form_valid(form) class ItemDeleteView(DeleteView): model = Item template_name = 'splitter/item_delete.html' def get_success_url(self): return reverse_lazy('bill-detail', args=[self.object.bill.id]) class BillUpdateView(UpdateView): model = Bill template_name = 'splitter/bill_update.html' form_class = BillUpdateForm def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill return super().form_valid(form) class BillUpdateTaxPercentView(UpdateView): model = Bill form_class = BillUpdateTaxPercentForm template_name = 'splitter/bill_update_tax_percent.html' def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill form.instance.tax = None return super().form_valid(form) class BillUpdateTaxAmountView(UpdateView): model = Bill form_class = BillUpdateTaxAmountForm template_name = 'splitter/bill_update_tax_amount.html' def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill form.instance.tax_percent = None return super().form_valid(form) class BillUpdateTipAmountView(UpdateView): model = Bill form_class = BillUpdateTipForm template_name = 'splitter/bill_update_tip.html' def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill form.instance.tip_percent = None return super().form_valid(form) class BillUpdateTipPercentView(UpdateView): model = Bill form_class = BillUpdateTipPercentForm template_name = 'splitter/bill_update_tip_percent.html' def form_valid(self, form): bill = get_object_or_404(Bill, id=self.kwargs['pk']) form.instance.bill = bill form.instance.tip = None return super().form_valid(form)
/users/tests.py
from django.test import TestCase from django.contrib.auth import get_user_model from django.urls import reverse, resolve from .forms import CustomUserCreationForm, CustomUserChangeForm # Create your tests here. class CustomUserTests(TestCase): def test_create_user(self): User = get_user_model() user = User.objects.create( username='test', email='test@email.com', password='test123', ) self.assertEqual(user.username, 'test') self.assertEqual(user.email, 'test@email.com') self.assertTrue(user.is_active) self.assertFalse(user.is_staff) self.assertFalse(user.is_superuser) def test_create_superuser(self): User = get_user_model() super_user = User.objects.create_superuser( username='superuser', email='superuser@email.com', password='super123', ) self.assertEqual(super_user.username, 'superuser') self.assertEqual(super_user.email, 'superuser@email.com') self.assertTrue(super_user.is_active) self.assertTrue(super_user.is_staff) self.assertTrue(super_user.is_superuser) class SignupPageTests(TestCase): username = 'testuser' email = 'testuser@email.com' def setUp(self): url = reverse('account_signup') self.response = self.client.get(url) def test_signup_template(self): self.assertEqual(self.response.status_code, 200) self.assertTemplateUsed(self.response, 'account/signup.html') self.assertContains(self.response, 'Sign up') self.assertNotContains(self.response, 'Should not contain this') def test_signup_form(self): new_user = get_user_model().objects.create_user( self.username, self.email ) self.assertEqual(get_user_model().objects.all().count(), 1) self.assertEqual( get_user_model().objects.all()[0].username, 'testuser' ) self.assertEqual( get_user_model().objects.all()[0].email, 'testuser@email.com' )
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
trineary/TradeTestingEngine
refs/heads/master
{"/TTE.py": ["/TradeTracking/TradeHistory.py"], "/TTEBootstrapTests/MonteCarloBootstrap.py": ["/TTEBootstrapTests/BootstrapCalcTools.py", "/TTEBootstrapTests/BootstrapABC.py"], "/TTEBootstrapTests/WhiteBootstrap.py": ["/TTEBootstrapTests/BootstrapCalcTools.py", "/TTEBootstrapTests/BootstrapABC.py"], "/TradeTracking/TradeHistory.py": ["/TradeTracking/TradeDetails.py"]}
└── ├── TTE.py ├── TTEBootstrapTests │ ├── BootstrapABC.py │ ├── BootstrapCalcTools.py │ ├── MonteCarloBootstrap.py │ └── WhiteBootstrap.py ├── TTEExampleCode.py ├── TTEPaperEx1.py ├── TTEPaperEx2.py ├── TradeTracking │ ├── TradeDetails.py │ └── TradeHistory.py └── __init__.py
/TTE.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 9/21/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # tte.py # # This file handles the interface to most of the code in this project. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages import math import pandas as pd import datetime as dt from matplotlib import pyplot import yahoo_finance as yfinance # Import my classes from TradeTracking.TradeHistory import TradeTracking from pytte.TTEBootstrapTests import WhiteBootstrap, MonteCarloBootstrap, TTEBootstrap class TTE: # Bootstrap options that are available with this package BOOTSTRAP_TTE = 0 BOOTSTRAP_MONTE_CARLO = 1 BOOTSTRAP_WHITE = 2 # Trading direction options\ CASH = 0 LONG = 1 SHORT = -1 def __init__(self): # Trade tracking self._tt = TradeTracking(trackDailyPositions=True) self._trade_history = None # Bootstrap initializations self._bs_tte = None self._bs_mc = None self._bs_wh = None self._bs = None # this is the currently selected bootstrap # Dataframe for trade data self._df = None self._column = None self._ticker = None return def get_hist_data(self, ticker, startdate, stopdate, column="Adj_Close"): # get historical data # Inputs # 1. ticker - ticker sympol of desired equite. Ex. 'SPY' # 2. startdate - start date to start collecting data from. Ex. startdate = '2016-08-20' # 3. stopdate - stop date to stop collecting data from. Ex. endDate = '2016-09-16' # 4. column - this is the column in the dataframe to use to get price information from. Default is 'Adj_Close' # Returns # 1. dataframe containing data for the specified inputs # # Get a dataframe with data between the two dates for the specified ticker. This will automatically load # the historical data into the local _df variable. # Get the historical data and load into the dataframe variable. Return the historical data to the calling # function for the user to cycle through it to generate trade signals. #self._df = GetHistoricalStockData(ticker, startdate, stopdate) # Get the data from yahoo finance, reorder the data, and then put the data into a dataframe for easy use. yahoo = yfinance.Share(ticker) data = yahoo.get_historical(start_date=startdate, end_date=stopdate) # data comes in reversed order. Put it in ascending order. data = data[::-1] # Put the data into a dataframe df = pd.DataFrame(data=data) # Load historical data and initialize other values self.load_hist_data(ticker, df, column) return df def load_hist_data(self, ticker, hist_data, column="Adj_Close"): # Load the specified data set. This is only used if the user loads historical data from a different source # (forex data for example). # Inputs # 1. hist_data - historical data in the format of a dataframe # 2. column - this is the column in the dataframe to use to get price information from. Default is 'Adj_Close' self._ticker = ticker self._df = hist_data self._tt.InitTickData(self._df) self._column = column self._trade_history = [0]*len(self._df[self._column]) # Make trade history the same length as the data pass def reset(self): ''' reset - dataframe is left alone, but all other internal tracking is reset so system can run a new test :return: ''' print "TODO: reset still needs to be implemented" pass def open_trade(self, index, direction): ''' :param index: index into the dataframe. :param direction: direction of the trade (CASH, LONG, or SHORT) :return: None ''' # Make sure index is in a valid range if index < 0 or index > len(self._df[self._column]): print "open_trade error! index is out of bounds (%d)\n" %index return False openprice = self._df.ix[index][self._column] spread = 0.0 timestamp = self._df.ix[index]['Date'] self._tt.OpenTrade(self._ticker, openprice=openprice, spread=spread, direction=direction, timestamp=timestamp) return True def close_trade(self, index): # Make sure index is in a valid range if index < 0 or index > len(self._df[self._column]): print "close_trade error! index is out of bounds (%d)\n" %index return False closeprice = self._df.ix[index][self._column] timestamp = self._df.ix[index]['Date'] self._tt.CloseTrade(closeprice=closeprice, timestamp=timestamp, direction=self.CASH) return True def select_bootstrap(self, selection): ''' set_bootstrap Set the bootstrap to be used for all subsequent queries. This can be updated at any time to get information relevant to the specified bootstrap. :return: ''' if selection == self.BOOTSTRAP_TTE: self._bs = TTEBootstrap.TTEBootstrap() elif selection == self.BOOTSTRAP_MONTE_CARLO: self._bs = MonteCarloBootstrap.MonteCarloBootstrap() elif selection == self.BOOTSTRAP_WHITE: self._bs = WhiteBootstrap.WhiteBootstrap() else: print "select_bootstrap error! selection was invaled (%d)\n" %(selection) print "Valid selections are the following: \n" print " BOOTSTRAP_TTE, BOOTSTRAP_MONTE_CARLO, BOOTSTRAP_WHITE\n\n" return False return True def get_pvalue(self, iterations=5000): # Calculate the total return based on what has been tracked in the trade tracker rule_percent_return = self._tt.GetPercentReturn() # Initialize the test self._bs.init_test(self._df, self._column, num_iterations=iterations) # Determine what the p-value is for this bootstrap method pvalue = self._bs.has_predictive_power(rule_percent_return) return pvalue def get_trade_stats(self): return self._tt.GetTradeStatsStr() def print_trade_stats(self): print "\n", self._tt.GetTradeStatsStr() pass def print_trade_history(self): self._tt.PrintHistory() pass def plot_pdf(self): ''' plot_pdf # Display a plot showing the probability density function of returns calculated. :return: ''' self._bs.plot_histogram() pass def plot_trades_equity(self): ''' plot_trades_equity Generate a plot that shows the trades and the equity curve for the given dataframe :return: ''' #print len(self.pairtimestmps), len(self.pairhistory), len(self.visualRewardHistory) pyplot.figure(1) #pyplot.subplot(211) pyplot.plot(self._df[self._column]) #pyplot.subplot(212) #pyplot.plot(self.visualRewardHistory) #pyplot.subplot(313) #pyplot.plot(self.visualTradeHistory) #x1,x2,y1,y2 = pyplot.axis() #pyplot.axis((x1,x2,(y1-0.25), (y2+0.25))) pyplot.xticks( rotation= 45 ) pyplot.show() pass def plot_all(self, title=None): #pyplot.xlabel('Smarts') #pyplot.ylabel('Probability') pyplot.figure(1) pyplot.subplot(311) pyplot.title(title) sample_means = self._bs.get_histogram_data() pyplot.hist(sample_means, bins=20) pyplot.grid(True) pyplot.subplot(312) pyplot.plot(self._df[self._column]) pyplot.subplot(313) dates = self._df['Date'].tolist() x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in dates] pyplot.plot(self._df['Position']) #pyplot.plot(x, self._df['Position']) #pyplot.gcf().autofmt_xdate() pyplot.xticks( rotation= 45 ) x1,x2,y1,y2 = pyplot.axis() pyplot.axis((x1,x2,(y1-0.25), (y2+0.25))) pyplot.show() pass # -------------------------------------------------------------------------------------------------------------------- # Test functions # -------------------------------------------------------------------------------------------------------------------- # Default function when the file is run if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__
/TTEBootstrapTests/BootstrapABC.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 11/12/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # BootstrapABC.py # # Abstract base class for all tests developed to evaluate rules. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages from abc import ABCMeta, abstractmethod class BootstrapABC(): """ Base test class for bootstrap tests. InitTest will initialize the bootstrap test with data that it needs and parameters needed to build the sampling distribution. HasPredictivePower will take a percent gain from a rule and determine what the predictive power is SaveOutput will generate output for the test.. maybe """ __metaclass__ = ABCMeta @abstractmethod def init_test(self): pass @abstractmethod def has_predictive_power(self): pass #@abstractmethod #def SaveOutput(self): #s pass
/TTEBootstrapTests/BootstrapCalcTools.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 11/12/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # BootstrapCalcTools.py # # This file contains tools common to the bootstrap processes. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages import pandas import math # -------------------------------------------------------------------------------------------------------------------- def GetDailyReturns(df, colName): """ Generate a dataframe containing the mean daily returns from the specified data frame and column name. The daily returns are calculated using log(day/prev day). :param df: :param colName: :return: """ prev = None returns = [] for index, rowVal in df[colName].iteritems(): if(prev == None): dreturn = 0.0 else: dreturn = math.log10(float(rowVal)/prev) #print index, rowVal, dreturn prev = float(rowVal) returns.append(dreturn) return pandas.DataFrame(data=returns) def GetMeanDailyReturn(df, colName): """ Given the dataframe and column, calculate the daily return for the sequence and then determine the mean daily return. :param df: :param colName: :return: return the mean along with the dataframe containing the data """ dailyReturns = GetDailyReturns(df, colName) meanDailyReturn = dailyReturns[0].mean() return meanDailyReturn, dailyReturns def GetDetrendedReturns(df, col_name): # Get the daily returns and the mean daily return meanDailyReturn, dailyreturns = GetMeanDailyReturn(df, col_name) # Detrend the daily returns by subtracting off the mean daily return detrended_returns = dailyreturns.apply(lambda x: x-meanDailyReturn) return detrended_returns def GetPVal(sample_dist, rule_percent_return): ''' :param sample_dist: sample distribution, this is assumed to be a distribution around zero :param rule_percent_return: percent return of the trading rule :return: return the pvalue associated with the trading rule ''' lessThanCnt = 0 for meanReturn in sample_dist: if meanReturn < rule_percent_return: lessThanCnt += 1 percentage = lessThanCnt/float(len(sample_dist)) #print percentage, lessThanCnt pval = 1-percentage return pval
/TTEBootstrapTests/MonteCarloBootstrap.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 11/12/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # kWhiteRealityCheck.py # # This file is an implementation of White's Reality Check for evaluating the significance of a trading rule's # predictive power. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages import random from matplotlib import pyplot as plt from BootstrapABC import BootstrapABC # Import my classes from BootstrapCalcTools import GetDailyReturns, GetMeanDailyReturn, GetDetrendedReturns, GetPVal # Global values for selecting different options # -------------------------------------------------------------------------------------------------------------------- class MonteCarloBootstrap(BootstrapABC): def __init__(self): self._sample_means = [] self._rules = [] pass def init_test(self, df, col_name, num_iterations=5000): """ init_test initializes the White Reality Check Bootstrap test :param df: dataframe containing data to bootstrap :param col_name: name of colume in data frame containing data :param daily_rules: list of rules applied to the time series in the data frame. rules take on (+1, -1) values :param num_iterations: number of iterations to build bootstrap sampling distribution :return: none """ self._df = df self._detrended_data = None self._col_name = col_name self._num_iterations = num_iterations datalen = len(self._df.index) #gain = float(self._df.at[datalen-1, col_name]) - float(self._df.at[0, col_name]) #dailyGain = gain/datalen pass def plot_histogram(self, bins=20): if len(self._sample_means) > 0: plt.hist(self._sample_means, bins=bins) plt.grid(True) plt.show() return def get_histogram_data(self): return self._sample_means def run_monte_carlo_round(self, detrended_data): # Run through one iteration of pairing daily rules with detrended returns. Calculate the average return # and return that value. # check length of detrended data and daily rules. They should be the same length. if len(detrended_data) != len(self._rules): print "Monte Carlo error! Detrended data and daily rules not the same length." return -1 # Get a copy of the detrended data detrended_copy = detrended_data[0].tolist() # Cycle through the data now total_val = 0 tradeDirection = 1 for index in xrange(0, len(detrended_copy)): index = random.randint(0, len(detrended_copy)-1) if tradeDirection == 1: tradeDirection = -1 else: tradeDirection = 1 total_val += tradeDirection * detrended_copy.pop(index) #print "total_val: ", total_val return total_val def has_predictive_power(self, rule_percent_return): # Get daily rules from the dataframe rules = self._df['Position'].tolist() #print "rules", rules # Set daily rules self._rules = rules # Get one-day market price changes # Detrend the data detrended_returns = GetDetrendedReturns(self._df, self._col_name) # Run through iterations and collect distribution self._sample_means = [] for i in range(0, self._num_iterations, 1): avg_val = self.run_monte_carlo_round(detrended_returns) self._sample_means.append(avg_val) # Calculate and return the p-value for the sample mean distribution calculated above return GetPVal(self._sample_means, rule_percent_return) # -------------------------------------------------------------------------------------------------------------------- # Test functions def test_monte_carlo_round(): rules = [1, 1, -1, -1, -1] data = [2, 3, 4, 3, 2] mc = MonteCarloBootstrap() mc._rules = rules mean = mc.run_monte_carlo_round(data) print "mean result: ", mean pass def test_monte_carlo_prediction(): rules = [1, 1, -1, -1, -1] data = [2, 3, 4, 3, 2] mc = MonteCarloBootstrap() mc._rules = rules mean = mc.run_monte_carlo_round(data) print "mean result: ", mean pass if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__ #test_monte_carlo_round() test_monte_carlo_prediction()
/TTEBootstrapTests/WhiteBootstrap.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 11/12/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # kWhiteRealityCheck.py # # This file is an implementation of White's Reality Check for evaluating the significance of a trading rule's # predictive power. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages import random from matplotlib import pyplot as plt from BootstrapABC import BootstrapABC # Import my classes from BootstrapCalcTools import GetDailyReturns, GetMeanDailyReturn, GetPVal # Global values for selecting different options # -------------------------------------------------------------------------------------------------------------------- class WhiteBootstrap(BootstrapABC): def __init__(self): self._sample_means = [] self._df = None self._detrended_data = None self._col_name = None self._num_iterations = None pass def init_test(self, df, col_name, num_iterations=5000): """ init_test initializes the White Reality Check Bootstrap test :param df: dataframe containing data to bootstrap :param col_name: name of colume in data frame containing data :param num_iterations: number of iterations to build bootstrap sampling distribution :return: none """ self._df = df self._detrended_data = None self._col_name = col_name self._num_iterations = num_iterations datalen = len(self._df.index) # Detrend the data meanDailyReturn, dailyreturns = GetMeanDailyReturn(self._df, self._col_name) dailyreturns = dailyreturns.apply(lambda x: x-meanDailyReturn) # Iterate over the daily returns and build a distribution of returns meanList = [] for meanCount in xrange(0, self._num_iterations): sampleSum = 0 for randomReturn in xrange(0, datalen): index = random.randint(0, datalen-1) sampleSum += dailyreturns.iat[index, 0] #sampleMean = sampleSum #/ datalen #meanList.append(sampleMean) meanList.append(sampleSum) #histogram, edges = np.histogram(meanList, bins=10) self._sample_means = meanList pass def plot_histogram(self, bins=20): if len(self._sample_means) > 0: plt.hist(self._sample_means, bins=bins) plt.grid(True) plt.show() return def get_histogram_data(self): return self._sample_means def has_predictive_power(self, rule_percent_return): return GetPVal(self._sample_means, rule_percent_return) # -------------------------------------------------------------------------------------------------------------------- # Test functions if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__
/TTEExampleCode.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 9/22/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # TestFile.py # # This file shows how to use the TTE package # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages # Import my classes from pytte.tte import TTE def TestWhiteRealityCheck(tte, df): #tte.open_trade(4, tte.LONG) #tte.close_trade(11) tte.open_trade(10, tte.SHORT) tte.close_trade(13) tte.select_bootstrap(tte.BOOTSTRAP_WHITE) pval = tte.get_pvalue(iterations=5000) print "pval:", pval tte.plot_pdf() print tte.get_trade_stats() tte.plot_trades_equity() pass def TestMonteCarloBootstrap(tte, df): #tte.open_trade(4, tte.LONG) #tte.close_trade(11) tte.open_trade(10, tte.SHORT) tte.close_trade(13) tte.select_bootstrap(tte.BOOTSTRAP_MONTE_CARLO) pval = tte.get_pvalue(iterations=5000) print "pval:", pval tte.plot_pdf() print tte.get_trade_stats() tte.plot_trades_equity() pass def TestTTEBootstrap(tte, df): #tte.open_trade(4, tte.LONG) #tte.close_trade(11) tte.open_trade(10, tte.SHORT) tte.close_trade(13) tte.select_bootstrap(tte.BOOTSTRAP_TTE) pval = tte.get_pvalue(iterations=5000) print "pval:", pval, "\n" tte.plot_pdf() tte.plot_trades_equity() tte.print_trade_history() tte.print_trade_stats() pass def CompareBootstrapOutputs(tte, df): #tte.open_trade(4, tte.LONG) #tte.close_trade(11) tte.open_trade(10, tte.SHORT) tte.close_trade(13) # Get and display TTE Bootstrap pvalue #tte.select_bootstrap(tte.BOOTSTRAP_TTE) #pval = tte.get_pvalue(iterations=5000) #print "TTE pval:", pval # Get and display TTE Bootstrap pvalue #tte.select_bootstrap(tte.BOOTSTRAP_MONTE_CARLO) #pval = tte.get_pvalue(iterations=5000) #print "Monte Carlo pval:", pval # Get and display TTE Bootstrap pvalue tte.select_bootstrap(tte.BOOTSTRAP_WHITE) pval = tte.get_pvalue(iterations=5000) print "White pval:", pval pass if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__ equity = "SPY" startDate = '2016-08-20' endDate = '2016-09-16' tte = TTE() df = tte.get_hist_data(equity, startDate, endDate) #TestWhiteRealityCheck(tte, df) #TestMonteCarloBootstrap(tte, df) #TestTTEBootstrap(tte, df) CompareBootstrapOutputs(tte, df)
/TTEPaperEx1.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 12/14/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # TTEPaperEx1.py # # This file shows illustrates an example of trading rules that are active during a trend that, while provfitable, # fails to reject the null hypothesis. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages # Import my classes from pytte.tte import TTE def TestWhiteRealityCheck(tte, df): print "White Bootstrap" tte.select_bootstrap(tte.BOOTSTRAP_WHITE) pval = tte.get_pvalue(iterations=5000) print "pval:", pval tte.plot_pdf() print tte.get_trade_stats() tte.plot_trades_equity() pass def TestMonteCarloBootstrap(tte, df): print "Monte Carlo Bootstrap" tte.select_bootstrap(tte.BOOTSTRAP_MONTE_CARLO) pval = tte.get_pvalue(iterations=5000) print "pval:", pval tte.plot_pdf() print tte.get_trade_stats() #tte.plot_trades_equity() tte.print_trade_history() pass def TestTTEBootstrap(tte, df): print "TTE Bootstrap" tte.select_bootstrap(tte.BOOTSTRAP_TTE) pval = tte.get_pvalue(iterations=5000) print "pval:", pval, "\n" tte.plot_pdf() tte.plot_trades_equity() tte.print_trade_history() tte.print_trade_stats() pass if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__ equity = "DIA" startDate = '2016-02-12' endDate = '2016-04-20' tte = TTE() df = tte.get_hist_data(equity, startDate, endDate) # Trade 1 tte.open_trade(0, tte.LONG) tte.close_trade(5) # Trade 2 tte.open_trade(10, tte.LONG) tte.close_trade(15) # Trade 3 tte.open_trade(10, tte.LONG) tte.close_trade(20) # Trade 4 #tte.open_trade(20, tte.LONG) #tte.close_trade(25) # Trade 5 #tte.open_trade(30, tte.LONG) #tte.close_trade(35) # Trade 6 #tte.open_trade(40, tte.LONG) #tte.close_trade(45) #TestWhiteRealityCheck(tte, df) #TestMonteCarloBootstrap(tte, df) TestTTEBootstrap(tte, df)
/TTEPaperEx2.py
# -------------------------------------------------------------------------------------------------------------------- # # Patrick Neary # Date: 12/14/2016 # # Fin 5350 / Dr. Tyler J. Brough # Trade Testing Engine: # # TTEPaperEx2.py # # This file illustrates an example of trading rules that are engaged in a varied environment that is profitable # and rejects the null hypothesis. # # -------------------------------------------------------------------------------------------------------------------- # Import standard packages # Import my classes from pytte.tte import TTE def PrintResults(tte, title=None): pval = tte.get_pvalue(iterations=5000) print "pval:", pval, "\n" tte.print_trade_history() tte.print_trade_stats() tte.plot_all(title) pass def TestWhiteRealityCheck(tte): print "White Bootstrap" tte.select_bootstrap(tte.BOOTSTRAP_WHITE) PrintResults(tte, "White Bootstrap") pass def TestMonteCarloBootstrap(tte): print "Monte Carlo Bootstrap" tte.select_bootstrap(tte.BOOTSTRAP_MONTE_CARLO) PrintResults(tte, "Monte Carlo Bootstrap") pass def TestTTEBootstrap(tte): print "TTE Bootstrap" tte.select_bootstrap(tte.BOOTSTRAP_TTE) PrintResults(tte, "TTE Bootstrap") pass if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__ equity = "DIA" startDate = '2016-01-04' endDate = '2016-03-20' tte = TTE() df = tte.get_hist_data(equity, startDate, endDate) print len(df) # Trade 1 tte.open_trade(0, tte.SHORT) tte.close_trade(5) # Trade 2 tte.open_trade(8, tte.SHORT) tte.close_trade(12) # Trade 3 #tte.open_trade(15, tte.LONG) #tte.close_trade(20) # Trade 4 tte.open_trade(29, tte.LONG) tte.close_trade(34) # Trade 5 # tte.open_trade(39, tte.LONG) tte.close_trade(46) # Trade 6 tte.open_trade(47, tte.LONG) tte.close_trade(50) #TestWhiteRealityCheck(tte) #TestMonteCarloBootstrap(tte) TestTTEBootstrap(tte)
/TradeTracking/TradeDetails.py
# -------------------------------------------------------------------------------------------------------------------- # Patrick Neary # CS 6110 # Project # 10/6/2016 # # TradeDetails.py # # This file # -------------------------------------------------------------------------------------------------------------------- import datetime import math class TradeDetails: CASH = 0 LONG = 1 SHORT = -1 def __init__(self): self.openPrice = 0.0 self.closePrice = 0.0 self.spread = 0.0 self.tradeDirection = self.CASH self.equityName = "" self.openTimeStamp = None self.closeTimeStamp = None self.duration = None self.currPL = 0.0 self.stopLoss = None self.profitTarget = None self.totalPL = 0.0 self.ID = None return def __str__(self): mystr = "%s, %s, %s, %s, %s, %s, %s, %s, %s" % (self.equityName, self.openTimeStamp, self.closeTimeStamp, self.duration, self.openPrice, self.closePrice, self.currPL, self.totalPL, self.ID) return mystr def OpenTrade(self, equity, openprice, spread, direction, timestamp, id=None): # timestamp - needs to be a string in format of "year-month-day" or in datetime format. if isinstance(timestamp, str) == True: timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%d") # Check to make sure timestamp is a date/time format if isinstance(timestamp, datetime.datetime) == False: print "Timestamp needs to be in datetime format" return self.openPrice = openprice self.equityName = equity self.spread = spread self.tradeDirection = direction self.openTimeStamp = timestamp self.ID = id # ID of entity making the trade return def CloseTrade(self, closeprice, timestamp): # timestamp - needs to be a string in format of "year-month-day" or in datetime format. if isinstance(timestamp, str) == True: timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%d") # Check to make sure timestamp is a date/time format if isinstance(timestamp, datetime.datetime) == False: print "Timestamp needs to be in datetime format" return # Close the trade self.closePrice = closeprice self.closeTimeStamp = timestamp #self.tradeDirection = self.CASH self.GetCurrentPL(closeprice) self.GetTradeDuration() #self.ID = None return def GetCurrentPL(self, currprice): # Calculate the change in price from open to now. This includes the cost of the spread. if self.tradeDirection is self.CASH: self.currPL = 0.0 elif self.tradeDirection is self.SHORT: self.currPL = float(self.openPrice) - float(currprice) - float(self.spread) else: self.currPL = float(currprice) - float(self.openPrice) - float(self.spread) #print "GetCurrentPL: ", self.currPL, self.tradeDirection, self.spread return self.currPL def GetTradePercentPL(self): if self.tradeDirection is self.CASH: totalPercentReturn = 0.0 elif self.tradeDirection is self.SHORT: totalPercentReturn = math.log10(float(self.openPrice)) - math.log10(float(self.closePrice)) else: totalPercentReturn = math.log10(float(self.closePrice)) - math.log10(float(self.openPrice)) return totalPercentReturn def GetTradeDuration(self): duration = self.closeTimeStamp - self.openTimeStamp self.duration = duration return self.duration def RedefineDirection(self, cash, long, short): self.CASH = cash self.LONG = long self.SHORT = short return def SetTotalPL(self, totalPL): self.totalPL = totalPL return def GetCurrentTradeID(self): return self.ID # -------------------------------------------------------------------------------------------------------------------- # # -------------------------------------------------------------------------------------------------------------------- def TestTradeDetails(): openTS = datetime.datetime(2016, 04, 18) closeTS = datetime.datetime(2016, 04, 19) openPrice = 78.8 closePrice = 78.2 spread = 0.032 td = TradeDetails() td.OpenTrade("AUDJPY", openPrice, spread, 1, openTS) td.CloseTrade(closePrice, closeTS) print td return # -------------------------------------------------------------------------------------------------------------------- # Default function when the file is run # -------------------------------------------------------------------------------------------------------------------- if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__ TestTradeDetails()
/TradeTracking/TradeHistory.py
# -------------------------------------------------------------------------------------------------------------------- # Patrick Neary # Fin5350 # Project # 10/6/2016 # # TradeHistory.py # # This file # -------------------------------------------------------------------------------------------------------------------- import math import datetime import numpy as np from TradeDetails import TradeDetails class TradeTracking: def __init__(self, trackHistory=True, trackDailyPositions=False): self.totalPL = 0.0 self.totalPercentReturn = 0.0 self.tradeHistory = [] self.currTrade = TradeDetails() self.trackHistory = trackHistory self.totalWins = 0 self.totalLosses = 0 self.longWins = 0 self.shortWins = 0 self.longLosses = 0 self.shortLosses = 0 self.tickData = None self.trackDailyPositions = trackDailyPositions self.ID = None self.isTradeOpen = False self.currTradeDirection = 0 self.currPrice = 0.0 self.CASH = 0 self.LONG = 1 self.SHORT = -1 self.firsttimestamp = None self.lasttimestamp = None self.openPrice = 0.0 self.cnt = 0 return def __str__(self): tradehistorystr = "" for trade in self.tradeHistory: tradehistorystr += trade.__str__() + "\n" return tradehistorystr def InitTickData(self, tickData): # tickData - data frame containing time stamped tick information. A column will be added to this data to # track every time period's position. 0 - No trade, 1 - Long position, -1 - Short position. self.tickData = tickData # Add column to track position for every time period and make sure entries are 0 for 'no trade' self.tickData['Position'] = np.zeros((len(tickData), 1)) pass def UpdateTradePositions(self): # Find and update the positions between the open and close dates in the dataframe. This function is based # off of values in self.currTrade. This shouldn't be called until after openTimeStamp, closeTimeStamp, and # tradeDirection have been set.. or after CloseTrade has been called. # Only run through this if we're tracking daily positions if self.trackDailyPositions == False: return # Iterate through the array looking for relevant time stamps. index = 0 for idx in self.tickData.iterrows(): #print idx currtimestamp = datetime.datetime.strptime(self.tickData.ix[index]['Date'], "%Y-%m-%d") if currtimestamp >= self.currTrade.openTimeStamp and currtimestamp <= self.currTrade.closeTimeStamp: self.tickData.set_value(index, 'Position', self.currTrade.tradeDirection) index += 1 pass def OpenTrade(self, equity, openprice, spread, direction, timestamp, id=None): if self.firsttimestamp == None: self.firsttimestamp = timestamp self.currTrade = TradeDetails() self.currTrade.OpenTrade(equity, openprice, spread, direction, timestamp, id) self.ID = id self.isTradeOpen = True self.currTradeDirection = direction self.openPrice = openprice #print "OpenTrade", equity, openprice, spread, direction, timestamp, id return def UpdateStats(self, closeprice): tradePL = self.currTrade.GetCurrentPL(closeprice) if tradePL > 0: if self.currTradeDirection == self.LONG: self.longWins += 1 else: self.shortWins += 1 self.totalWins += 1 else: if self.currTradeDirection == self.LONG: self.longLosses += 1 else: self.shortLosses += 1 self.totalLosses += 1 pass def CloseTrade(self, closeprice, timestamp, direction): self.lasttimestamp = timestamp # Close the trade self.currTrade.CloseTrade(closeprice, timestamp) tradePL = self.currTrade.GetCurrentPL(closeprice) self.totalPercentReturn += self.currTrade.GetTradePercentPL() if tradePL > 0 or self.cnt == 0: # add trade to the history if enabled if self.trackHistory == True: # Drop half of the losing trades self.tradeHistory.append(self.currTrade) # Add trade results to total PL self.totalPL += tradePL self.currTrade.SetTotalPL(self.totalPL) # Update stats self.UpdateStats(closeprice) # Update trade positions for this trade if it's being tracked self.UpdateTradePositions() if tradePL < 0: if self.cnt < 3: self.cnt += 1 if self.cnt >= 3: self.cnt = 0 self.ID = None self.isTradeOpen = False self.currTradeDirection = direction return def GetTradeCurrPL(self, currPrice): return self.currTrade.GetCurrentPL(currPrice) def UpdateCurrPrice(self, currPrice): self.currPrice = currPrice pass def GetTimeStepPL(self, nextPrice): # This gets the difference between the updated price and the next price. Order of subtraction is based on # the direction of the trade. if self.currTradeDirection == self.LONG: return nextPrice - self.currPrice elif self.currTradeDirection == self.SHORT: return self.currPrice - nextPrice else: return 0.0 def GetTradeCurrDuration(self): return self.currTrade.GetTradeDuration() def GetTotalPL(self): # This returns the cumulative PL prior to current trade (if any) return self.totalPL def GetPercentReturn(self): # This calculates the percent return using ln(r1) - ln(r2) where r1 and r2 are opening/closing prices return self.totalPercentReturn def GetTradeStatsStr(self): tradestatsstr = "" totalTrades = max((self.totalWins + self.totalLosses), 1) tradestatsstr += "Trading Stats:\n" tradestatsstr += "Total trades:\t %d\n" % totalTrades tradestatsstr += "Total Wins:\t\t %d, \t%0.2f%%\n" %(self.totalWins, (float(self.totalWins)/totalTrades)*100) tradestatsstr += "Total Losses:\t %d, \t%0.2f%%\n" %(self.totalLosses, (float(self.totalLosses)/totalTrades)*100) longTrades = max((self.longWins + self.longLosses), 1) shortTrades = max((self.shortWins + self.shortLosses), 1) tradestatsstr += "Long wins:\t\t %d, \t%0.2f%%\n" %(self.longWins, (float(self.longWins)/longTrades)*100) tradestatsstr += "Long losses:\t %d, \t%0.2f%%\n" %(self.longLosses, (float(self.longLosses)/longTrades)*100) tradestatsstr += "Short wins:\t\t %d, \t%0.2f%%\n" %(self.shortWins, (float(self.shortWins)/shortTrades)*100) tradestatsstr += "Short losses:\t %d, \t%0.2f%%\n" %(self.shortLosses, (float(self.shortLosses)/shortTrades)*100) tradestatsstr += "Total P/L:\t\t %0.2f\n" % self.totalPL tradestatsstr += "Percent P\L:\t %0.2f\n" % self.totalPercentReturn tradestatsstr += "First timestamp: %s\n" % self.firsttimestamp tradestatsstr += "Last timestamp:\t %s\n" % self.lasttimestamp return tradestatsstr def PrintHistory(self): tradehistorystr = "" for trade in self.tradeHistory: tradehistorystr += trade.__str__() print trade return tradehistorystr def GetHistory(self): # Return list of TradeDetails return self.tradeHistory def getCurrID(self): # If application is interested in the ID for the current trade then it will be available (if set). return self.ID def GetIsTradeOpen(self): return self.isTradeOpen def GetCurrTradeDirection(self): return self.currTradeDirection # -------------------------------------------------------------------------------------------------------------------- # # -------------------------------------------------------------------------------------------------------------------- def ExecuteTestTrades(): CASH = 0 LONG = 1 SHORT = 2 openTS = datetime.datetime(2016, 04, 18) closeTS = datetime.datetime(2016, 04, 19) openPrice = 78.8 closePrice = 78.2 spread = 0.032 tt = TradeTracking() tt.OpenTrade("AUDJPY", openPrice, spread, LONG, openTS) tt.CloseTrade(closePrice, closeTS) print tt openTS = datetime.datetime(2016, 04, 20) closeTS = datetime.datetime(2016, 04, 22) openPrice = 79.0 closePrice = 79.8 spread = 0.032 tt.OpenTrade("AUDJPY", openPrice, spread, LONG, openTS) tt.CloseTrade(closePrice, closeTS) print "" print tt return # -------------------------------------------------------------------------------------------------------------------- # Default function when the file is run # -------------------------------------------------------------------------------------------------------------------- if __name__ == "__main__": # Functions to run if this file is executed print "Run default function for ", __file__ ExecuteTestTrades()
/__init__.py
from pytte import tte
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Tadaboody/good_smell
refs/heads/master
{"/good_smell/__init__.py": ["/good_smell/lint_smell.py", "/good_smell/flake8_ext.py", "/good_smell/ast_smell.py", "/good_smell/main.py", "/good_smell/smell_warning.py"], "/good_smell/ast_smell.py": ["/good_smell/lint_smell.py", "/good_smell/smell_warning.py"], "/good_smell/main.py": ["/good_smell/lint_smell.py", "/good_smell/smell_warning.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/filter.py", "/good_smell/smells/yield_from.py"], "/tests/test_no_transform.py": ["/good_smell/smells/nested_for.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/smells/filter.py": ["/good_smell/ast_smell.py"], "/good_smell/smells/join_literal.py": ["/good_smell/ast_smell.py"], "/good_smell/smells/nested_for.py": ["/good_smell/ast_smell.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/ast_smell.py"], "/good_smell/smells/yield_from.py": ["/good_smell/ast_smell.py"], "/tests/test_collection.py": ["/good_smell/main.py"], "/tests/test_enumerate_fix.py": ["/good_smell/main.py"], "/good_smell/flake8_ext.py": ["/good_smell/smell_warning.py"], "/good_smell/lint_smell.py": ["/good_smell/smell_warning.py"]}
└── ├── docs │ └── generate_smell_doc.py ├── good_smell │ ├── __init__.py │ ├── ast_smell.py │ ├── flake8_ext.py │ ├── lint_smell.py │ ├── main.py │ ├── smell_warning.py │ └── smells │ ├── __init__.py │ ├── filter.py │ ├── join_literal.py │ ├── nested_for.py │ ├── range_len_fix.py │ └── yield_from.py └── tests ├── examples │ ├── example.py │ ├── filter.py │ ├── join_literal.py │ ├── nested_for.py │ ├── range_len.py │ └── yield_from.py ├── test_collection.py ├── test_enumerate_fix.py └── test_no_transform.py
/docs/generate_smell_doc.py
from tests.test_collection import collect_tests, test_case_files def generate_smell_docs(): for example_test in [list(collect_tests(file))[0] for file in test_case_files]: desc, symbols, before, after = example_test symbol = list(symbols)[0] print( f"""### {desc} ({symbol}) ```py {before}``` Will be fixed to ```py {after}```""" ) if __name__ == "__main__": generate_smell_docs()
/good_smell/__init__.py
# flake8:noqa try: from importlib import metadata except ImportError: # Running on pre-3.8 Python; use importlib-metadata package import importlib_metadata as metadata __version__ = metadata.version("good-smell") from .smell_warning import SmellWarning from .lint_smell import LintSmell from .ast_smell import AstSmell, LoggingTransformer from .smells import implemented_smells from .main import fix_smell, print_fixed_smell, main, smell_warnings from . import smells # Allow importing good_smell.smells from .flake8_ext import LintingFlake8
/good_smell/ast_smell.py
import abc import ast from typing import List, Optional, Type, TypeVar import astor from good_smell import LintSmell, SmellWarning class LoggingTransformer(ast.NodeTransformer, abc.ABC): """A subclass of transformer that logs the nodes it transforms""" def __init__(self, transform): self.transformed_nodes = list() self.transofrm = transform @abc.abstractmethod def is_smelly(self, node: ast.AST) -> bool: """Checks if the given `node` should be transformed""" def visit(self, node: ast.AST): if not self.is_smelly(node): return self.generic_visit(node) self.transformed_nodes.append(node) if self.transofrm: return super().visit(node) return self.generic_visit(node) T = TypeVar("T") def unwrap(x: Optional[T]) -> T: if x is None: raise ValueError("Unrwapped None") return x class AstSmell(LintSmell): def check_for_smell(self) -> List[SmellWarning]: """Return a list of all occuring smells of this smell class""" transformer = self.transformer_class(self.transform) transformer.visit(unwrap(self.tree)) node: ast.stmt return [ SmellWarning( msg=self.warning_message, row=node.lineno, col=node.col_offset, path=unwrap(self.path), symbol=self.symbol, ) for node in transformer.transformed_nodes ] def fix_smell(self) -> str: """Return a fixed version of the code without the code smell""" return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree))) @property @abc.abstractmethod def transformer_class(self) -> Type[LoggingTransformer]: """The class for the transformer used to create"""
/good_smell/flake8_ext.py
import ast from typing import Generator, Tuple from good_smell import SmellWarning, implemented_smells, __version__ class LintingFlake8: """Entry point good smell to be used as a flake8 linting plugin""" name = "good-smell" version = __version__ def __init__(self, tree: ast.AST, filename: str): """"http://flake8.pycqa.org/en/latest/plugin-development/plugin-parameters.html""" self.tree = tree self.filename = filename def run(self) -> Generator[Tuple[int, int, str, str], None, None]: for num, smell in enumerate(implemented_smells): warnings = smell( transform=False, tree=self.tree, path=self.filename ).check_for_smell() warning: SmellWarning yield from ( ( warning.row, warning.col, f"SML{str(num).zfill(3)} {warning.msg}", "GoodSmell", ) for warning in warnings )
/good_smell/lint_smell.py
import abc import ast import os from typing import List, Optional from good_smell import SmellWarning class LintSmell(abc.ABC): """Abstract Base class to represent the sniffing instructions for the linter""" def __init__( self, transform: bool, path: Optional[str] = None, tree: Optional[ast.AST] = None, ): self.tree = tree self.path = path self.transform = transform @classmethod def from_source( cls, source_code: str, transform: bool = True, start_line: Optional[int] = 0, end_line: Optional[int] = None, path: Optional[str] = None, ) -> "LintSmell": start_line = start_line end_line = end_line or len(source_code.splitlines()) source_code = os.linesep.join(source_code.splitlines()[start_line:end_line]) return cls(transform=transform, path=path, tree=ast.parse(source_code)) @abc.abstractmethod def check_for_smell(self) -> List[SmellWarning]: """Return a list of all occuring smells of this smell class""" @abc.abstractmethod def fix_smell(self) -> str: """Return a fixed version of the code without the code smell""" @property @abc.abstractmethod def symbol(self) -> str: """The symbolic name for the smell""" @property @abc.abstractmethod def warning_message(self) -> str: """The symbolic name for the smell"""
/good_smell/main.py
from pathlib import Path from typing import Iterable, Type from fire import Fire from good_smell import LintSmell, SmellWarning, implemented_smells def print_smell_warnings(path: str): """Prints any warning messages about smells""" print( "\n".join( warning.warning_string() for warning in smell_warnings(Path(path).read_text(), path) ) ) def smell_warnings(source: str, path: str = "") -> Iterable[SmellWarning]: for smell in implemented_smells: yield from smell.from_source( source_code=source, path=str(path), transform=False ).check_for_smell() def print_fixed_smell(path: str, starting_line: int = 0, end_line: int = None): """Prints a fixed version of `source`""" pathlib_path = Path(path) source = pathlib_path.read_text() print(fix_smell(source, starting_line, end_line)) def fix_smell( source: str, starting_line: int = 0, end_line: int = None, path: str = None ) -> str: """Returns a fixed version of `source`""" smell: Type[LintSmell] for smell in implemented_smells: source = smell.from_source( source_code=source, start_line=starting_line, end_line=end_line, path=path, transform=True, ).fix_smell() return source def main(): Fire({"fix": print_fixed_smell}) if __name__ == "__main__": main()
/good_smell/smell_warning.py
from typing import NamedTuple FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}" PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})" def to_dict(namedtuple: NamedTuple) -> dict: return dict(zip(namedtuple._fields, list(namedtuple))) class SmellWarning(NamedTuple): """Class to represent a warning message about a smell""" row: int col: int path: str msg: str symbol: str def warning_string(self, formatter: str = PYLINT_FORMAT): return formatter.format(**to_dict(self))
/good_smell/smells/__init__.py
from .filter import FilterIterator from .join_literal import JoinLiteral from .nested_for import NestedFor from .range_len_fix import RangeLenSmell from .yield_from import YieldFrom implemented_smells = (RangeLenSmell, NestedFor, FilterIterator, YieldFrom, JoinLiteral)
/good_smell/smells/filter.py
from typing import TypeVar import ast from typing import cast from good_smell import AstSmell, LoggingTransformer class NameReplacer(ast.NodeTransformer): def __init__(self, old: ast.Name, new: ast.AST): self.old = old self.new = new def visit_Name(self, node: ast.Name) -> ast.AST: if node.id == self.old.id: return self.new return node T = TypeVar("T", bound=ast.AST) def replace_name_with_node(node: T, old_val: ast.Name, new_val: ast.AST) -> T: """Returns `node` with all occurences of `old_val` (a variable) replaced with `new_val` (an expression)""" return NameReplacer(old_val, new_val).visit(node) class FilterTransformer(LoggingTransformer): """Bumps the filter to the iterator""" def visit_For(self, node: ast.For) -> ast.For: if_node: ast.If = node.body[0] filter_condition: ast.Expr = if_node.test if not isinstance(node.iter, ast.GeneratorExp): # Create a generator expression if it doesn't exist GEN_ELT_NAME = "x" gen_exp: ast.GeneratorExp = cast( ast.GeneratorExp, ast_node(f"({GEN_ELT_NAME} for {GEN_ELT_NAME} in seq)").value, ) gen_target = ast_node(GEN_ELT_NAME).value iter_comprehension = gen_exp.generators[0] iter_comprehension.iter = replace_name_with_node( node.iter, node.target, gen_target ) else: gen_exp = node.iter iter_comprehension = gen_exp.generators[0] gen_target = gen_exp.elt iter_comprehension.ifs.append( replace_name_with_node(filter_condition, node.target, gen_target) ) node.iter = gen_exp node.body = if_node.body return node def is_smelly(self, node: ast.AST): """Check if the node is only a nested for""" return ( isinstance(node, ast.For) and len(node.body) == 1 and isinstance(node.body[0], ast.If) ) class FilterIterator(AstSmell): """Checks for adjacent nested fors and replaces them with itertools.product""" @property def transformer_class(self): return FilterTransformer @property def warning_message(self): return "Consider using itertools.product instead of a nested for" @property def symbol(self) -> str: return "filter-iterator" def ast_node(expr: str) -> ast.AST: """Helper function to parse a string denoting an expression into an AST node""" # ast.parse returns "Module(body=[Node])" return ast.parse(expr).body[0]
/good_smell/smells/join_literal.py
import ast from good_smell import AstSmell, LoggingTransformer try: # ast.Str is deprecated in py3.8 and will be removed StrConst = (ast.Constant, ast.Str) except AttributeError: StrConst = (ast.Constant,) class JoinLiteral(AstSmell): """Checks if joining a literal of a sequence.""" @property def transformer_class(self): return Transformer @property def warning_message(self): return ( "Consider using str.format instead of joining a constant amount of strings." ) @property def symbol(self): return "join-literal" class Transformer(LoggingTransformer): """Checks for usages of str.join with a constant amount of arguments.""" @staticmethod def normalize_constant(node) -> ast.Constant: """Compatibility wrapper for py3.8+, ast, ast.Str and ast.Num are replaced by ast.Constant. We don't type annotate `node` so it doesn't break on py3.10+ when these classes will be removed. """ for attr in ["value", "s", "n"]: try: return ast.Constant(value=getattr(node, attr)) except AttributeError: pass raise ValueError("Not a constat.") def visit_Call(self, node: ast.Call) -> ast.Call: format_arguments = node.args[0].elts format_delimiter = self.normalize_constant(node.func.value).value format_string = format_delimiter.join(["{}"] * len(format_arguments)) new_call = ast.Call( func=ast.Attribute( value=ast.Constant(value=format_string), attr="format", ctx=ast.Load() ), args=format_arguments, keywords=[], ) return ast.fix_missing_locations(new_call) @staticmethod def is_smelly(node: ast.AST): """Check if the node is only a nested for""" return ( isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute) and isinstance(node.func.value, StrConst) and node.func.attr == "join" and len(node.args) == 1 and isinstance(node.args[0], ast.List) and not any(isinstance(el, ast.Starred) for el in node.args[0].elts) )
/good_smell/smells/nested_for.py
import ast import typing from good_smell import AstSmell, LoggingTransformer class NameInNode(LoggingTransformer): def __init__(self, name: ast.Name): self.name = name super().__init__(transform=False) def is_smelly(self, node: ast.AST) -> bool: return isinstance(node, ast.Name) and node.id == self.name.id def name_in_node(node: ast.AST, name: ast.Name) -> bool: """Checks if the node `name` is in `node`""" checker = NameInNode(name) checker.visit(node) return bool(checker.transformed_nodes) class NestedFor(AstSmell): """Checks for adjacent nested fors and replaces them with itertools.product""" @property def transformer_class(self): return NestedForTransformer @property def warning_message(self): return "Consider using a nested comprehension instead of a nested for" @property def symbol(self): return "nested-for" class NestedForTransformer(LoggingTransformer): """NodeTransformer that goes visits all the nested `for`s and replaces them with itertools.product""" def visit_For(self, node: ast.For) -> ast.For: inner_for: ast.For = node.body[0] new_target = ast.Tuple(elts=[node.target, inner_for.target]) def create_comprehension(for_node: ast.For) -> ast.comprehension: return ast.comprehension(target=for_node.target, iter=for_node.iter, ifs=[]) gen_exp = ast.GeneratorExp( elt=new_target, generators=[create_comprehension(node), create_comprehension(inner_for)], ) new_for = ast.For( target=new_target, iter=gen_exp, body=inner_for.body, orelse=node.orelse ) new_for = ast.fix_missing_locations(new_for) return new_for @staticmethod def is_smelly(node: ast.AST): """Check if the node is only a nested for""" return ( isinstance(node, ast.For) and isinstance(node.body[0], ast.For) and len(node.body) == 1 # Check there's no dependancy between nodes and not any( name_in_node(node.body[0].iter, target) for target in for_target_names(node) ) ) def ast_node(expr: str) -> ast.AST: """Helper function to parse a string denoting an expression into an AST node""" # ast.parse returns "Module(body=[Node])" return ast.parse(expr).body[0] def for_target_names(node: ast.For) -> typing.List[ast.Name]: """Returns the names that are the targets of the for loop.""" target = typing.cast(typing.Union[ast.Tuple, ast.Name], node.target) return target.elts if isinstance(target, ast.Tuple) else [target]
/good_smell/smells/range_len_fix.py
import ast from good_smell import AstSmell, LoggingTransformer from typing import Union, Container class RangeLenSmell(AstSmell): @property def transformer_class(self): return EnumerateFixer @property def symbol(self): return "range-len" @property def warning_message(self) -> str: return "Instead of using a c-style for loop, try using enumerate!" class AssignDeleter(ast.NodeTransformer): def __init__(self, seq: ast.Name, target: ast.Name): self.id = target self.seq = seq self.elem_target = None or ast.Name(id="elm", ctx=ast.Store()) self.uses_seq = False def visit_Assign(self, node: ast.Assign): """Deletes a node if it assigning using the for target""" if self.accesses_seq(node.value): self.elem_target = node.targets[0] return None return self.generic_visit(node) @staticmethod def __get_slice_id(node: ast.Subscript) -> Container[str]: """Get slice identifier. Needed because in python3.9 ast.Subscript.slice became a ast.Name, instead of a ast.Index.""" slice = node.slice if isinstance(slice, ast.Name): return [slice.id] if isinstance(slice, ast.Index): return [slice.value.id] if isinstance(slice, ast.Slice): return [slice.upper, slice.lower] def accesses_seq(self, node) -> bool: """Checks if the node acceses the sequence[target]""" if ( isinstance(node, ast.Subscript) and self.id.id in self.__get_slice_id(node) and node.value.id == self.seq.id ): self.uses_seq = True return True def visit_Subscript(self, node: ast.Subscript): if self.accesses_seq(node): return self.elem_target return self.generic_visit(node) class EnumerateFixer(LoggingTransformer): def visit_For(self, node: ast.For) -> Union[bool, ast.For]: enumerate_node = ast.Name(id="enumerate", ctx=ast.Load()) node_iterable = node.iter.args[0].args[0] original_target = node.target deleter = AssignDeleter(target=original_target, seq=node_iterable) new_body = deleter.visit(node).body or [ast.Pass()] elm_target = ( deleter.elem_target if deleter.uses_seq else ast.Name(id="_", ctx=ast.Store()) ) # for (original_target,elm_target) in enumerate(node_iterable): new_node = ast.For( target=ast.Tuple(elts=[original_target, elm_target], ctx=ast.Store()), iter=ast.Call(func=enumerate_node, args=[node_iterable], keywords=[]), body=new_body, orelse=node.orelse, ) new_node = ast.fix_missing_locations(ast.copy_location(new_node, node)) new_node = self.generic_visit(new_node) return new_node @staticmethod def is_smelly(node: ast.For): try: return node.iter.func.id == "range" and node.iter.args[0].func.id == "len" except AttributeError: return False
/good_smell/smells/yield_from.py
from good_smell import AstSmell, LoggingTransformer import ast class YieldFrom(AstSmell): """Checks for yields inside for loops""" @property def transformer_class(self): return YieldFromTransformer @property def warning_message(self): return "Consider using yield from instead of yield inside of a for loop" @property def symbol(self): return "yield-from" class YieldFromTransformer(LoggingTransformer): """NodeTransformer that goes visits all the yields in fors and replaces them with yield from""" def visit_For(self, node: ast.For): yield_from = ast.Expr(value=ast.YieldFrom(node.iter)) return ast.fix_missing_locations(yield_from) @staticmethod def is_smelly(node: ast.AST): """Check if the node is a yield inside a for""" return ( isinstance(node, ast.For) and len(node.body) == 1 and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Yield) )
/tests/examples/example.py
#: example # example-symbol,another-one before = 0 before = 1 # ==> after = 0 after = 1 # END #: example # None before = 0 before = 1 # ==> after = 0 after = 1 # END
/tests/examples/filter.py
#: Move if to iterator # filter-iterator for i in range(10): if i == 2: print(1) print(2) # ==> for i in (x for x in range(10) if x == 2): print(1) print(2) # END #: Don't move if there's code before # None for i in range(10): print(1) if pred(i): print(2) # ==> for i in range(10): print(1) if pred(i): print(2) # END #: don't move if there's code after # None for i in range(10): if pred(i): print(1) print(2) # ==> for i in range(10): if pred(i): print(1) print(2) # END #: Merge into existing expr # filter-iterator for i in (a * 2 for a in range(2)): if pred(i): pass # ==> for i in (a * 2 for a in range(2) if pred(a * 2)): pass # END #: Merge into existing complex expr # filter-iterator for i in (f(a) * 2 for a in range(2)): if pred(i): pass # ==> for i in (f(a) * 2 for a in range(2) if pred(f(a) * 2)): pass # END
/tests/examples/join_literal.py
#: Warn when using join on a list of known literals. # join-literal a = "foo" b = "bar" ",".join([a, b]) # ==> a = "foo" b = "bar" "{},{}".format(a, b) # END #: Don't warn when joining an iterable # None iterable = ["a","b"] ",".join(iterable) # ==> iterable = ["a","b"] ",".join(iterable) # END #: Don't warn when joining a generator expression # None ",".join(str(i) for i in range(100)) # ==> ",".join(str(i) for i in range(100)) # END #: Don't warn when joining a list comprehension # None ",".join([str(i) for i in range(100)]) # ==> ",".join([str(i) for i in range(100)]) # END #: Don't warn when the list literal includes an unpacking # None ",".join([1,2,3,*a]) # ==> ",".join([1,2,3,*a]) # END
/tests/examples/nested_for.py
#: Flatten for-loops using nested comprehensions # nested-for for i in seq_a: for j in seq_b: print(i, j) # ==> for i, j in ((i, j) for i in seq_a for j in seq_b): print(i, j) # END #: Don't work if there's code between the loops (no way to know if it's unsafe) # None for i in seq_a: print(i) for j in seq_b: print(i, j) # ==> for i in seq_a: print(i) for j in seq_b: print(i, j) # END #: Don't work if there's code after the nested for # None for i in seq_a: for j in seq_b: print(i, j) print(i) # ==> for i in seq_a: for j in seq_b: print(i, j) print(i) # END #: Don't flatten a nested for with dependencies (#26) # None for num in range(1, 5): for digits in range(1, 10 ** num): pass # ==> for num in range(1, 5): for digits in range(1, 10 ** num): pass # END #: Check no errors with unpacking (#61) # None for i, num in enumerate(range(1, 5)): for digits in range(1, 10 ** num): pass # ==> for i, num in enumerate(range(1, 5)): for digits in range(1, 10 ** num): pass # END #: Check no errors with unpacking (#61), but also flatten # nested-for for i, j in enumerate(range(1, 5)): for digits in range(1, 10 ** num): pass # ==> for (i, j), digits in ( ((i, j), digits) for i, j in enumerate(range(1, 5)) for digits in range(1, 10 ** num) ): pass
/tests/examples/range_len.py
#: Range len instead of enumerate # range-len for i in range(len(sequence)): a = sequence[i] print(a) # ==> for i, a in enumerate(sequence): print(a) # END #: Replace an empty body with pass # range-len for i in range(len(sequence)): a = sequence[i] # ==> for i, a in enumerate(sequence): pass # END #: replaces access # range-len for i in range(len(sequence)): other_thing(sequence[i], i) # ==> for i, elm in enumerate(sequence): other_thing(elm, i) # END #: Multiple replaces # range-len for i in range(len(sequence)): x = sequence[i] do_thing(x, i) other_thing(sequence[i], i) # ==> for i, x in enumerate(sequence): do_thing(x, i) other_thing(x, i) # END #: Nested for # range-len for i in range(len(sequence)): x = sequence[i] for j in range(len(sequence)): do_thing(x, j) other_thing(sequence[i], i) # ==> for i, x in enumerate(sequence): for j, _ in enumerate(sequence): do_thing(x, j) other_thing(x, i) # END #: Replace unused var with _ # range-len for i in range(len(sequence)): do_thing(i) # ==> for i, _ in enumerate(sequence): do_thing(i) # END #: Don't remove an assign to something else # range-len for i in range(len(sequence)): a = 0 print(sequence[j]) # ==> for i, _ in enumerate(sequence): a = 0 print(sequence[j]) # END #: Behave correctly when used in the upper part of a slice # range-len for i in range(len(sequence)): print(sequence[1:i]) # ==> for i, _ in enumerate(sequence): print(sequence[1:i]) # END #: Don't replace access when used in the upper part of a slice # range-len for i in range(len(sequence)): print(sequence[i:1]) # ==> for i, _ in enumerate(sequence): print(sequence[i:1]) # END #: Don't replace access used in the upper part of a slice # range-len for i in range(len(sequence)): print(sequence[2:1]) # ==> for i, _ in enumerate(sequence): print(sequence[2:1]) # END
/tests/examples/yield_from.py
#: Use "yield from" instead of yield inside of a for loop # yield-from seq = range(10) for x in seq: yield x # ==> seq = range(10) yield from seq
/tests/test_collection.py
import ast import itertools from os import PathLike from pathlib import Path from typing import Iterator, NamedTuple, Set import astor import black import pytest from good_smell import fix_smell, smell_warnings FILE_DIR = Path(__file__).parent EXAMPLES_DIR = FILE_DIR / "examples" def normalize_formatting(code: str) -> str: """Returns a string of the code with normalized formatting for easier compares""" code = astor.to_source(ast.parse(code)) try: return black.format_file_contents(code, fast=True, mode=black.Mode()) except black.NothingChanged: return code class CollectedTest(NamedTuple): desc: str error_symbols: Set[str] before: int after: str def is_title(line: str) -> bool: return line.startswith(TITLE_PREFIX) TITLE_PREFIX = "#:" BEFORE_AFTER_SPLITTER = "==>" END_SYMBOL = "END" SPECIAL_SYMBOLS = (TITLE_PREFIX, BEFORE_AFTER_SPLITTER, END_SYMBOL) def collect_tests(path: PathLike) -> Iterator[CollectedTest]: """Collects all test cases listed in `path`""" with open(path) as fp: lines = fp.readlines() lines_iter = iter(lines) # Create iterator for continued iteration for line_num, line in enumerate(line for line in lines_iter if is_title(line)): desc = line.strip("#:").strip() symbols_line = next(lines_iter).strip("#").strip() symbols = {symbol for symbol in symbols_line.split(",") if symbol != "None"} before = "".join( itertools.takewhile(lambda l: BEFORE_AFTER_SPLITTER not in l, lines_iter) ) after = "".join(itertools.takewhile(lambda l: END_SYMBOL not in l, lines_iter)) collected_test = CollectedTest( desc=desc, error_symbols=symbols, before=before, after=after ) if any( symbol in field for field, symbol in itertools.product(collected_test, SPECIAL_SYMBOLS) ): raise Exception( f"""Wrongly formatted example in {path}:{line_num} {collected_test}""" ) yield collected_test def test_collect_tests(): example_path = EXAMPLES_DIR / "example.py" collected_tests = list(collect_tests(example_path)) assert len(collected_tests) == 2 case_with_symbol, case_with_no_symbol = collected_tests assert case_with_symbol.desc == "example" assert case_with_symbol.error_symbols == {"example-symbol", "another-one"} assert case_with_symbol.before == """before = 0\nbefore = 1\n""" assert case_with_symbol.after == """after = 0\nafter = 1\n""" assert case_with_no_symbol.error_symbols == set() test_case_files = [f for f in EXAMPLES_DIR.iterdir() if "example" not in f.name] def params_from_file(): for file in test_case_files: yield from ( pytest.param( case.before, case.after, case.error_symbols, id=f"{file.with_suffix('').name}:{case.desc}", ) for case in collect_tests(file) ) @pytest.mark.parametrize(["before", "_", "symbols"], params_from_file()) def test_smell_warning(before, _, symbols): assert set(symbols) == {smell.symbol for smell in smell_warnings(before)} @pytest.mark.parametrize(["before", "after", "_"], list(params_from_file())) def test_smell_fixing(before, after, _): assert normalize_formatting(fix_smell(before)) == normalize_formatting(after)
/tests/test_enumerate_fix.py
from good_smell import fix_smell from re import match import pytest valid_sources = [""" a = [0] for i in range(len(a)): print(a[i]) """, """ b = [1] for i in range(len(a + b)): print(i) """] @pytest.mark.parametrize("source", valid_sources) def test_range_len_fix(source): assert not match(r'for \w+ in range\(len\(.+\)\):', fix_smell(source))
/tests/test_no_transform.py
import itertools import ast from good_smell.smells import NestedFor def compare_ast(node1, node2): """Compare two ast, adapted from https://stackoverflow.com/a/30581854 to py3""" if type(node1) is not type(node2): return False if isinstance(node1, ast.AST): for k, v in vars(node1).items(): if k in ("lineno", "col_offset", "ctx"): continue if not compare_ast(v, getattr(node2, k)): return False return True elif isinstance(node1, list): return all(itertools.starmap(compare_ast, zip(node1, node2))) else: return node1 == node2 def test_no_transform(): source = """ seq_a = [0] seq_b = range(10) for i in seq_a: for j in seq_b: print(i, j)""" original_tree = ast.parse(source) tree = ast.parse(source) assert NestedFor(transform=False, path="test", tree=tree).check_for_smell() assert compare_ast(original_tree, tree)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
EricHughesABC/T2EPGviewer
refs/heads/master
{"/simple_pandas_plot.py": ["/ImageData.py"], "/visionplot_widgets.py": ["/ImageData.py"]}
└── ├── ImageData.py ├── azzT2paramsDialog.py ├── epgT2paramsDialog.py ├── mriplotwidget.py ├── simple_pandas_plot.py ├── t2fit.py └── visionplot_widgets.py
/ImageData.py
# -*- coding: utf-8 -*- """ Created on Tue Mar 6 14:55:05 2018 @author: ERIC """ import os import numpy as np import pandas as pd import nibabel class T2imageData(): def __init__(self): self.currentSlice = None self.currentEcho = None self.T2imagesDirpath = None self.dixonImagesDirpath = None self.dixonResultsDirpath = None self.T2resultsDirpath = None self.root = None self.studyName = None self.subject = None self.session = None self.imagedRegion = None self.protocol = None self.results = None self.roiType = None self.fitModel = None self.imagedRegionType = self.roiType self.T2imageType = None self.T2MRIimageFilenameAndPath = "" self.dixonImageType = None self.dixonMRIimageFilenameAndPath = "" self.T2resultsFilenameAndPath = "" self.dixonResultsFilenameAndPath = "" self.fittingParam = "T2m" self.numRowsT2 = None self.numColsT2 = None self.numSlicesT2 = None self.numEchoesT2 = None self.dixonSlices = None self.T2slices = None self.ImageDataT2 = None self.mriSliceIMG = None self.t2_data_summary_df = None self.dixon_data_summary_df = None def readin_alldata_from_results_filename(self, fn): print("inside readin_alldata_from_results_filename") self.set_dataDir_and_results_filenames(fn) self.set_T2imageData_filename_and_type() self.set_dixonImageData_filename_and_type() print("T2resultsDirpath :: ",self.T2resultsDirpath) print("dixonResultsDirpath :: ", self.dixonResultsDirpath) print("T2imagesDirpath :: ", self.T2imagesDirpath) print("dixonImagesDirpath :: ", self.dixonImagesDirpath) print("T2imageType :: ", self.T2imageType) print("T2MRIimageFilenameAndPath :: ", self.T2MRIimageFilenameAndPath) print("dixonImageType :: ", self.dixonImageType) print("dixonMRIimageFilenameAndPath ::", self.dixonMRIimageFilenameAndPath) print("T2resultsFilenameAndPath :: ", self.T2resultsFilenameAndPath) print("dixonResultsFilenameAndPath :: ", self.dixonResultsFilenameAndPath) def set_T2imageData_filename_and_type(self): """Searches for image data in directory can be nifti or analyze sets the type and filename""" print("inside set_T2imageData_filename_and_type") print("self.T2imagesDirpath", self.T2imagesDirpath) if self.T2imagesDirpath == None: self.T2imageType = None return False else: imgFilenameList = [ os.path.join(self.T2imagesDirpath,fn) for fn in os.listdir(self.T2imagesDirpath) if "nii" in fn or "img" in fn] if len(imgFilenameList) == 0: self.T2imageType = None self.T2MRIimageFilenameAndPath = None return False else: self.T2MRIimageFilenameAndPath = imgFilenameList[0] if "nii" in self.T2MRIimageFilenameAndPath: self.T2imageType = "nifti" else: self.T2imageType = "analyze" return True def set_dixonImageData_filename_and_type(self): """Searches for image data in directory can be nifti or analyze sets the type and filename filename must have fatPC. in it""" print( "inside set_dixonImageData_filename_and_type") print("self.dixonImagesDirpath",self.dixonImagesDirpath) if self.dixonImagesDirpath == None: self.dionImageType = None return False else: imgFilenameList = [ os.path.join(self.dixonImagesDirpath,fn) for fn in os.listdir(self.dixonImagesDirpath) if "fatPC." in fn and ("nii" in fn or "img" in fn)] if len(imgFilenameList) == 0: self.dixonImageType = None self.dixonMRIimageFilenameAndPath = None return False else: self.dixonMRIimageFilenameAndPath = imgFilenameList[0] if "nii" in self.dixonMRIimageFilenameAndPath: self.dixonImageType = "nifti" else: self.dixonImageType = "analyze" return True def set_results_dir(self,protocol, resultsDir): resultsDirpath = None # resultsDirpath1 = resultsDir dirpath = os.path.join(self.root,self.studyName,self.subject,self.session, self.imagedRegion,protocol, self.results,self.roiType,self.fitModel) if os.path.exists(dirpath): resultsDirpath = dirpath else: dirpath = os.path.join(self.root,self.studyName,self.subject,self.session, self.imagedRegion,protocol, self.results,self.roiType) if os.path.exists(dirpath): fitModels = [f for f in os.listdir(dirpath)] if len(fitModels)> 0: resultsDirpath = os.path.join(dirpath, fitModels[0]) return resultsDir, resultsDirpath def set_dataDir_and_results_filenames( self, fn): print("inside set_dataDir_and_results_filenames") print("fn", fn) resultsDir, resultsFilename = os.path.split(fn) print("resultsDir", resultsDir) print("resultsFilename", resultsFilename) resultsDirList = resultsDir.split(os.path.sep) print("resultsDirList",resultsDirList, ) sessionIndex = [ i for i,w in enumerate(resultsDirList) if "sess" in w] print("sessionIndex",sessionIndex) if len(sessionIndex): si = sessionIndex[0] print("si",si) print("resultsDirList",resultsDirList) print("resultsDirList[0]",resultsDirList[0]) # print("resultsDirList[0][-1]",resultsDirList[0][-1]) if len(resultsDirList[0])>0: if ":" == resultsDirList[0][-1]: # add path seperator if root ends in : resultsDirList[0] = resultsDirList[0]+os.path.sep print("resultsDirList[0]", resultsDirList[0]) self.root = os.path.sep.join(resultsDirList[:si-2]) self.studyName = resultsDirList[si-2] self.subject = resultsDirList[si-1] self.session = resultsDirList[si] self.imagedRegion = resultsDirList[si+1] self.protocol = resultsDirList[si+2] self.results = resultsDirList[si+3] self.roiType = imagedRegionType = resultsDirList[si+4] self.fitModel = resultsDirList[si+5] print("self.root",self.root) ### create directory paths to T2 and Dixon results and image path # T2_images_dirPath # dixon_images_dirPath # dixon_results_dirPath # T2_results_dirPath ## T2 image path dirpath = os.path.join(self.root,self.studyName,self.subject, self.session,self.imagedRegion,"T2") if os.path.exists(dirpath): self.T2imagesDirpath = dirpath ## dixon image path dirpath = os.path.join(self.root,self.studyName,self.subject,self.session, self.imagedRegion,"dixon") if os.path.exists(dirpath): self.dixonImagesDirpath = dirpath ## set T2 and dixon results path if self.protocol.lower() == "t2": self.T2resultsDirpath, self.dixonResultsDirpath, = self.set_results_dir("dixon", resultsDir) elif self.protocol.lower() == "dixon": self.dixonResultsDirpath, self.T2resultsDirpath, = self.set_results_dir("T2", resultsDir) print("self.dixonResultsDirpath", self.dixonResultsDirpath) print("self.T2resultsDirpath", self.T2resultsDirpath) ## set csv results path name for T2 and dixon if "T2".lower() in fn.lower(): self.T2resultsFilenameAndPath = fn resultFilenameList = [ os.path.join(self.dixonResultsDirpath,fi) for fi in os.listdir(self.dixonResultsDirpath) if "results." in fi.lower() and (".csv" in fi.lower() )] if resultFilenameList: self.dixonResultsFilenameAndPath = resultFilenameList[0] elif "dixon" in fn.lower(): self.dixonResultsFilenameAndPath = fn resultFilenameList = [ os.path.join(self.T2resultsDirpath,fi) for fi in os.listdir(self.T2ResultsDirpath) if "results." in fi.lower() and (".csv" in fi.lower() )] if resultFilenameList: self.T2resultsFilenameAndPath = resultFilenameList[0] def read_T2_data(self): print("read_T2_data function entered") print("self.T2resultsFilenameAndPath", self.T2resultsFilenameAndPath) if os.path.exists(self.T2resultsFilenameAndPath): print(self.T2resultsFilenameAndPath, "exists") self.t2_data_summary_df = pd.read_csv(self.T2resultsFilenameAndPath) self.T2slices = list(self.t2_data_summary_df["slice"].unique()) return(True) else: print(self.T2resultsFilenameAndPath, "not Found" ) return(False) def read_Dixon_data(self): print("read_Dixon_data function entered") print("self.dixonResultsFilenameAndPath",self.dixonResultsFilenameAndPath) if os.path.exists(self.dixonResultsFilenameAndPath): print(self.dixonResultsFilenameAndPath, "exists") self.dixon_data_summary_df = pd.read_csv(self.dixonResultsFilenameAndPath) self.dixonSlices = list(self.dixon_data_summary_df["slice"].unique()) return(True) else: print(self.dixonResultsFilenameAndPath, "not Found" ) self.dixon_data_summary_df = pd.DataFrame() return(False) def read_T2_img_hdr_files(self): if os.path.exists(self.T2MRIimageFilenameAndPath): print(self.T2MRIimageFilenameAndPath, " found") self.t2_imghdr = nibabel.load(self.T2MRIimageFilenameAndPath) image_data = self.t2_imghdr.get_data() image_data = np.flipud(image_data.swapaxes(1,0)) self.update_imageDataT2(image_data) [self.numRowsT2, self.numColsT2, self.numSlicesT2, self.numEchoesT2] = self.ImageDataT2.shape # self.img1 = np.zeros((self.numRowsT2, self.numColsT2,3), dtype=np.double) self.mriSliceIMG = np.zeros((self.numRowsT2, self.numColsT2), dtype=np.double) # self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]/(self.ImageDataT2[:,:,0,0].max()*2) # self.img1[:,:,0] = self.ImageDataT2[:,:,0,0] self.mriSliceIMG = self.ImageDataT2[:,:,0,0]*1.0 self.currentEcho = 0 self.currentSlice = 0 # mainWindow.setWindowTitle(self.study_name) return(True) else: return(False) def update_imageDataT2(self, imageData): self.ImageDataT2 = imageData def overlayRoisOnImage(self, slice_pos, roi_data): print("Entering overlayRoisOnImage", slice_pos) print("roi_data",roi_data) if roi_data in self.t2_data_summary_df.columns: roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2) t2_data_query_df = self.t2_data_summary_df.query('slice == {}'.format(str(slice_pos))) roi_image_layer[t2_data_query_df.pixel_index] = t2_data_query_df[roi_data] self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer) elif roi_data in self.dixon_data_summary_df.columns: # print("slice_pos", slice_pos) # print("self.T2slices.index(slice_pos)",self.T2slices.index(slice_pos)) # print("self.dixonSlices[self.T2slices.index(slice_pos)]",self.dixonSlices[self.T2slices.index(slice_pos)]) if slice_pos in self.T2slices: dixon_slice = self.dixonSlices[self.T2slices.index(slice_pos)] else: dixon_slice = slice_pos roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2) #df_t2 = self.t2_data_summary_df[roi_data, 'pixel_index','roi'].groupby('slice') dixon_data_query_df = self.dixon_data_summary_df.query('slice == {}'.format(str(dixon_slice))) # roi_image_layer[dixon_data_query_df.pixels] = dixon_data_query_df[roi_data]/dixon_data_query_df[roi_data].max() roi_image_layer[dixon_data_query_df.pixel_index] = dixon_data_query_df[roi_data] # self.img1[:,:,2] = roi_image_layer.reshape((self.numRowsT2,self.numColsT2)) self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer) else: roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2) self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
/azzT2paramsDialog.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'azz_fit_parameters_dialog.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class AzzT2paramsDialog(object): def __init__(self, lmparams): self.lmparams = lmparams self.params = self.lmparams['azzt2fitparams'] def setupAzzT2paramsDialog(self, Dialog): self.dialog = Dialog Dialog.setObjectName("Azzabou") Dialog.resize(398, 335) self.buttonBox = QtWidgets.QDialogButtonBox(Dialog) self.buttonBox.setGeometry(QtCore.QRect(230, 280, 156, 23)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.layoutWidget = QtWidgets.QWidget(Dialog) self.layoutWidget.setGeometry(QtCore.QRect(20, 10, 361, 252)) self.layoutWidget.setObjectName("layoutWidget") self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.label_11 = QtWidgets.QLabel(self.layoutWidget) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 7, 0, 1, 1) self.label_12 = QtWidgets.QLabel(self.layoutWidget) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 8, 0, 1, 1) self.echoTimeValue = QtWidgets.QLineEdit(self.layoutWidget) self.echoTimeValue.setValidator(QtGui.QDoubleValidator()) self.echoTimeValue.setObjectName("echoTimeValue") self.gridLayout.addWidget(self.echoTimeValue, 8, 1, 1, 1) self.longFatT2value = QtWidgets.QLineEdit(self.layoutWidget) self.longFatT2value.setValidator(QtGui.QDoubleValidator()) self.longFatT2value.setObjectName("longFatT2value") self.gridLayout.addWidget(self.longFatT2value, 6, 1, 1, 1) self.shortFatT2value = QtWidgets.QLineEdit(self.layoutWidget) self.shortFatT2value.setValidator(QtGui.QDoubleValidator()) self.shortFatT2value.setObjectName("shortFatT2value") self.gridLayout.addWidget(self.shortFatT2value, 7, 1, 1, 1) self.label_2 = QtWidgets.QLabel(self.layoutWidget) self.label_2.setObjectName("label_2") self.gridLayout.addWidget(self.label_2, 0, 2, 1, 1) self.label_3 = QtWidgets.QLabel(self.layoutWidget) self.label_3.setObjectName("label_3") self.gridLayout.addWidget(self.label_3, 0, 3, 1, 1) self.muscleT2minimum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleT2minimum.setValidator(QtGui.QDoubleValidator()) self.muscleT2minimum.setObjectName("muscleT2minimum") self.gridLayout.addWidget(self.muscleT2minimum, 1, 2, 1, 1) self.fatFractionMinimum = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionMinimum.setValidator(QtGui.QDoubleValidator()) self.fatFractionMinimum.setObjectName("fatFractionMinimum") self.gridLayout.addWidget(self.fatFractionMinimum, 3, 2, 1, 1) self.fatFractionMaximum = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionMaximum.setValidator(QtGui.QDoubleValidator()) self.fatFractionMaximum.setObjectName("fatFractionMaximum") self.gridLayout.addWidget(self.fatFractionMaximum, 3, 3, 1, 1) self.muscleFractionMinimum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleFractionMinimum.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMinimum.setObjectName("muscleFractionMinimum") self.gridLayout.addWidget(self.muscleFractionMinimum, 2, 2, 1, 1) self.optimizeMuscleFraction = QtWidgets.QCheckBox(self.layoutWidget) self.optimizeMuscleFraction.setText("") self.optimizeMuscleFraction.setChecked(True) self.optimizeMuscleFraction.setObjectName("optimizeMuscleFraction") self.gridLayout.addWidget(self.optimizeMuscleFraction, 2, 4, 1, 1) self.muscleFractionMaximum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleFractionMaximum.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMaximum.setObjectName("muscleFractionMaximum") self.gridLayout.addWidget(self.muscleFractionMaximum, 2, 3, 1, 1) self.optimizeFatFraction = QtWidgets.QCheckBox(self.layoutWidget) self.optimizeFatFraction.setText("") self.optimizeFatFraction.setChecked(True) self.optimizeFatFraction.setObjectName("optimizeFatFraction") self.gridLayout.addWidget(self.optimizeFatFraction, 3, 4, 1, 1) self.label_7 = QtWidgets.QLabel(self.layoutWidget) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1) self.label_8 = QtWidgets.QLabel(self.layoutWidget) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 4, 0, 1, 1) self.optimizeMuscleT2 = QtWidgets.QCheckBox(self.layoutWidget) self.optimizeMuscleT2.setText("") self.optimizeMuscleT2.setChecked(True) self.optimizeMuscleT2.setObjectName("optimizeMuscleT2") self.gridLayout.addWidget(self.optimizeMuscleT2, 1, 4, 1, 1) self.fatFractionLongT2value = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionLongT2value.setValidator(QtGui.QDoubleValidator()) self.fatFractionLongT2value.setObjectName("fatFractionLongT2value") self.gridLayout.addWidget(self.fatFractionLongT2value, 4, 1, 1, 1) self.label_4 = QtWidgets.QLabel(self.layoutWidget) self.label_4.setObjectName("label_4") self.gridLayout.addWidget(self.label_4, 0, 4, 1, 1) self.muscleT2value = QtWidgets.QLineEdit(self.layoutWidget) self.muscleT2value.setObjectName("muscleT2value") self.gridLayout.addWidget(self.muscleT2value, 1, 1, 1, 1) self.fatFractionShortT2value = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionShortT2value.setValidator(QtGui.QDoubleValidator()) self.fatFractionShortT2value.setObjectName("fatFractionShortT2value") self.gridLayout.addWidget(self.fatFractionShortT2value, 5, 1, 1, 1) self.label_5 = QtWidgets.QLabel(self.layoutWidget) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1) self.label_6 = QtWidgets.QLabel(self.layoutWidget) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1) self.label_9 = QtWidgets.QLabel(self.layoutWidget) self.label_9.setObjectName("label_9") self.gridLayout.addWidget(self.label_9, 5, 0, 1, 1) self.muscleT2maximum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleT2maximum.setValidator(QtGui.QDoubleValidator()) self.muscleT2maximum.setObjectName("muscleT2maximum") self.gridLayout.addWidget(self.muscleT2maximum, 1, 3, 1, 1) self.label_10 = QtWidgets.QLabel(self.layoutWidget) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 6, 0, 1, 1) self.muscleFractionValue = QtWidgets.QLineEdit(self.layoutWidget) self.muscleFractionValue.setValidator(QtGui.QDoubleValidator()) self.muscleFractionValue.setObjectName("muscleFractionValue") self.gridLayout.addWidget(self.muscleFractionValue, 2, 1, 1, 1) self.label = QtWidgets.QLabel(self.layoutWidget) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 0, 1, 1, 1) self.fatFractionValue = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionValue.setValidator(QtGui.QDoubleValidator()) self.fatFractionValue.setObjectName("fatFractionValue") self.gridLayout.addWidget(self.fatFractionValue, 3, 1, 1, 1) self.retranslateUi(Dialog) self.buttonBox.accepted.connect(self.dialog_ok_clicked) self.buttonBox.rejected.connect(Dialog.reject) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Azzabou", "Azzabout T2 model")) self.label_11.setText(_translate("Azzabou", "Short Fat T<sub>2</sub> (ms)")) self.label_12.setText(_translate("Azzabou", "Echo Time (ms)")) self.echoTimeValue.setText(_translate("Azzabou", "10.0")) self.longFatT2value.setText(_translate("Azzabou", "250.0")) self.shortFatT2value.setText(_translate("Azzabou", "43.0")) self.label_2.setText(_translate("Azzabou", "minimum")) self.label_3.setText(_translate("Azzabou", "maximum")) self.muscleT2minimum.setText(_translate("Azzabou", "0.0")) self.fatFractionMinimum.setText(_translate("Azzabou", "0.0")) self.fatFractionMaximum.setText(_translate("Azzabou", "10.0")) self.muscleFractionMinimum.setText(_translate("Azzabou", "0.0")) self.muscleFractionMaximum.setText(_translate("Azzabou", "10.0")) self.label_7.setText(_translate("Azzabou", "Fat Fraction")) self.label_8.setText(_translate("Azzabou", "Fat Fraction (Long T<sub>2</sub>)")) self.fatFractionLongT2value.setText(_translate("Azzabou", "0.6")) self.label_4.setText(_translate("Azzabou", "optimized")) self.muscleT2value.setText(_translate("Azzabou", "35.0")) self.fatFractionShortT2value.setText(_translate("Azzabou", "0.4")) self.label_5.setText(_translate("Azzabou", "Muscle T<sub>2</sub> (ms)")) self.label_6.setText(_translate("Azzabou", "Muscle Fraction")) self.label_9.setText(_translate("Azzabou", "Fat Fraction (Short T<sub>2</sub>)")) self.muscleT2maximum.setText(_translate("Azzabou", "100.0")) self.label_10.setText(_translate("Azzabou", "Long Fat T<sub>2</sub> (ms)")) self.muscleFractionValue.setText(_translate("Azzabou", "0.8")) self.label.setText(_translate("Azzabou", "value")) self.fatFractionValue.setText(_translate("Azzabou", "0.2")) def dialog_ok_clicked(self): print("dialog_ok_clicked") self.dialog.setResult(1) worked =self.get_fitparameters() if worked: self.params.pretty_print() self.dialog.accept() def get_fitparameters( self ): print("self.optimizeFatFraction.isChecked()", self.optimizeFatFraction.isChecked() ) #epgt2fitparams = lm.Parameters() worked = True try: self.params.add(name='T2muscle', value = float(self.muscleT2value.text()), min = float(self.muscleT2minimum.text()), max = float(self.muscleT2maximum.text()), vary = self.optimizeMuscleT2.isChecked()) self.params.add(name='Amuscle', value = float(self.muscleFractionValue.text()), min = float(self.muscleFractionMinimum.text()), max = float(self.muscleFractionMaximum.text()), vary = self.optimizeMuscleFraction.isChecked()) self.params.add(name='Afat', value = float(self.fatFractionValue.text()), min = float(self.fatFractionMinimum.text()), max = float(self.fatFractionMaximum.text()), vary = self.optimizeFatFraction.isChecked()) self.params.add(name='c_l', value=float(self.fatFractionLongT2value.text()), vary=False) self.params.add(name='c_s', value=float(self.fatFractionShortT2value.text()), vary=False) self.params.add(name='t2_fl', value=float(self.longFatT2value.text()), vary=False) self.params.add(name='t2_fs', value=float(self.shortFatT2value.text()), vary=False) self.params.add(name='echo', value=float(self.echoTimeValue.text()), vary=False) buttonsUnChecked = [not self.optimizeFatFraction.isChecked(), not self.optimizeMuscleFraction.isChecked(), not self.optimizeMuscleT2.isChecked()] print(buttonsUnChecked) if all(buttonsUnChecked): print("all buttuns unchecked") worked=False self.lmparams['azzt2fitparams'] = self.params except: print("exception occurred") worked = False return worked if __name__ == "__main__": import sys import lmfit as lm lmparams = {} epgt2fitparams = lm.Parameters() azzt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None), ('Amuscle', 40.0, True, 0, 250, None), ('T2muscle', 40.0, True, 0, 100, None), ('c_l', 0.55, False, 0, 2000, None), ('c_s', 0.45, False, 0, 2000, None), ('t2_fl', 250.0, False, 0, 2000, None), ('t2_fs', 43.0, False, 0, 2000, None), ('echo', 10.0, False, 0, 2000, None)) lmparams['epgt2fitparams'] = epgt2fitparams lmparams['azzt2fitparams'] = azzt2fitparams app = QtWidgets.QApplication(sys.argv) Azzabou = QtWidgets.QDialog() ui = AzzT2paramsDialog(lmparams) ui.setupAzzT2paramsDialog(Azzabou) Azzabou.show() sys.exit(app.exec_())
/epgT2paramsDialog.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'epg_fit_parameters_dialog.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! import lmfit as lm from PyQt5 import QtCore, QtGui, QtWidgets class EpgT2paramsDialog(object): def __init__(self, lmparams): self.lmparams = lmparams self.params = self.lmparams['epgt2fitparams'] def setupEpgT2paramsDialog(self, Dialog): self.Dialog = Dialog Dialog.setObjectName("Dialog") Dialog.resize(386, 284) self.buttonBox = QtWidgets.QDialogButtonBox(Dialog) self.buttonBox.setGeometry(QtCore.QRect(60, 250, 321, 23)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.widget = QtWidgets.QWidget(Dialog) self.widget.setGeometry(QtCore.QRect(20, 10, 361, 231)) self.widget.setObjectName("widget") self.gridLayout = QtWidgets.QGridLayout(self.widget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.fatT1value = QtWidgets.QLineEdit(self.widget) self.fatT1value.setValidator(QtGui.QDoubleValidator()) self.fatT1value.setObjectName("fatT1value") self.gridLayout.addWidget(self.fatT1value, 7, 1, 1, 1) self.muscleFractionMax = QtWidgets.QLineEdit(self.widget) self.muscleFractionMax.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMax.setObjectName("muscleFractionMax") self.gridLayout.addWidget(self.muscleFractionMax, 3, 3, 1, 1) self.optimizeMuscleFraction = QtWidgets.QCheckBox(self.widget) self.optimizeMuscleFraction.setText("") self.optimizeMuscleFraction.setChecked(True) self.optimizeMuscleFraction.setObjectName("optimizeMuscleFraction") self.gridLayout.addWidget(self.optimizeMuscleFraction, 3, 4, 1, 1) self.fatFractionMin = QtWidgets.QLineEdit(self.widget) self.fatFractionMin.setValidator(QtGui.QDoubleValidator()) self.fatFractionMin.setObjectName("fatFractionMin") self.gridLayout.addWidget(self.fatFractionMin, 4, 2, 1, 1) self.fatFractionMax = QtWidgets.QLineEdit(self.widget) self.fatFractionMax.setValidator(QtGui.QDoubleValidator()) self.fatFractionMax.setObjectName("fatFractionMax") self.gridLayout.addWidget(self.fatFractionMax, 4, 3, 1, 1) self.b1scaleMax = QtWidgets.QLineEdit(self.widget) self.b1scaleMax.setValidator(QtGui.QDoubleValidator()) self.b1scaleMax.setObjectName("b1scaleMax") self.gridLayout.addWidget(self.b1scaleMax, 5, 3, 1, 1) self.muscleFractionMin = QtWidgets.QLineEdit(self.widget) self.muscleFractionMin.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMin.setObjectName("muscleFractionMin") self.gridLayout.addWidget(self.muscleFractionMin, 3, 2, 1, 1) self.b1scaleValue = QtWidgets.QLineEdit(self.widget) self.b1scaleValue.setValidator(QtGui.QDoubleValidator()) self.b1scaleValue.setObjectName("b1scaleValue") self.gridLayout.addWidget(self.b1scaleValue, 5, 1, 1, 1) self.b1scaleMin = QtWidgets.QLineEdit(self.widget) self.b1scaleMin.setValidator(QtGui.QDoubleValidator()) self.b1scaleMin.setObjectName("b1scaleMin") self.gridLayout.addWidget(self.b1scaleMin, 5, 2, 1, 1) self.fatFractionLabel = QtWidgets.QLabel(self.widget) self.fatFractionLabel.setObjectName("fatFractionLabel") self.gridLayout.addWidget(self.fatFractionLabel, 4, 0, 1, 1) self.fatFractionValue = QtWidgets.QLineEdit(self.widget) self.fatFractionValue.setValidator(QtGui.QDoubleValidator()) self.fatFractionValue.setObjectName("fatFractionValue") self.gridLayout.addWidget(self.fatFractionValue, 4, 1, 1, 1) self.muscleT1label = QtWidgets.QLabel(self.widget) self.muscleT1label.setObjectName("muscleT1label") self.gridLayout.addWidget(self.muscleT1label, 6, 0, 1, 1) self.fatT2min = QtWidgets.QLineEdit(self.widget) self.fatT2min.setValidator(QtGui.QDoubleValidator()) self.fatT2min.setObjectName("fatT2min") self.gridLayout.addWidget(self.fatT2min, 2, 2, 1, 1) self.maxHeadingLabel = QtWidgets.QLabel(self.widget) self.maxHeadingLabel.setObjectName("maxHeadingLabel") self.gridLayout.addWidget(self.maxHeadingLabel, 0, 3, 1, 1) self.minHeadingLabel = QtWidgets.QLabel(self.widget) self.minHeadingLabel.setObjectName("minHeadingLabel") self.gridLayout.addWidget(self.minHeadingLabel, 0, 2, 1, 1) self.valueHeadingLabel = QtWidgets.QLabel(self.widget) self.valueHeadingLabel.setObjectName("valueHeadingLabel") self.gridLayout.addWidget(self.valueHeadingLabel, 0, 1, 1, 1) self.fatT2value = QtWidgets.QLineEdit(self.widget) self.fatT2value.setValidator(QtGui.QDoubleValidator()) self.fatT2value.setObjectName("fatT2value") self.gridLayout.addWidget(self.fatT2value, 2, 1, 1, 1) self.optimizeFatT2 = QtWidgets.QCheckBox(self.widget) self.optimizeFatT2.setText("") self.optimizeFatT2.setChecked(False) self.optimizeFatT2.setObjectName("optimizeFatT2") self.gridLayout.addWidget(self.optimizeFatT2, 2, 4, 1, 1) self.muscleT2value = QtWidgets.QLineEdit(self.widget) self.muscleT2value.setInputMethodHints(QtCore.Qt.ImhDigitsOnly|QtCore.Qt.ImhFormattedNumbersOnly) self.muscleT2value.setProperty("muscleValue", 0.0) self.muscleT2value.setProperty("number", 35.0) self.muscleT2value.setObjectName("muscleT2value") self.gridLayout.addWidget(self.muscleT2value, 1, 1, 1, 1) self.fatT2label = QtWidgets.QLabel(self.widget) self.fatT2label.setObjectName("fatT2label") self.gridLayout.addWidget(self.fatT2label, 2, 0, 1, 1) self.fatT2max = QtWidgets.QLineEdit(self.widget) self.fatT2max.setValidator(QtGui.QDoubleValidator()) self.fatT2max.setObjectName("fatT2max") self.gridLayout.addWidget(self.fatT2max, 2, 3, 1, 1) self.muscleT2max = QtWidgets.QLineEdit(self.widget) self.muscleT2max.setValidator(QtGui.QDoubleValidator()) self.muscleT2max.setObjectName("muscleT2max") self.gridLayout.addWidget(self.muscleT2max, 1, 3, 1, 1) self.opimizedHeadingLabel = QtWidgets.QLabel(self.widget) self.opimizedHeadingLabel.setObjectName("opimizedHeadingLabel") self.gridLayout.addWidget(self.opimizedHeadingLabel, 0, 4, 1, 1) self.muscleT2label = QtWidgets.QLabel(self.widget) self.muscleT2label.setObjectName("muscleT2label") self.gridLayout.addWidget(self.muscleT2label, 1, 0, 1, 1) self.muscleT2min = QtWidgets.QLineEdit(self.widget) self.muscleT2min.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly) self.muscleT2min.setObjectName("muscleT2min") self.gridLayout.addWidget(self.muscleT2min, 1, 2, 1, 1) self.optimizeMuscleT2 = QtWidgets.QCheckBox(self.widget) self.optimizeMuscleT2.setText("") self.optimizeMuscleT2.setChecked(True) self.optimizeMuscleT2.setObjectName("optimizeMuscleT2") self.gridLayout.addWidget(self.optimizeMuscleT2, 1, 4, 1, 1) self.optimizeB1scale = QtWidgets.QCheckBox(self.widget) self.optimizeB1scale.setText("") self.optimizeB1scale.setChecked(True) self.optimizeB1scale.setObjectName("optimizeB1scale") self.gridLayout.addWidget(self.optimizeB1scale, 5, 4, 1, 1) self.optimizeFatFraction = QtWidgets.QCheckBox(self.widget) self.optimizeFatFraction.setText("") self.optimizeFatFraction.setChecked(True) self.optimizeFatFraction.setObjectName("optimizeFatFraction") self.gridLayout.addWidget(self.optimizeFatFraction, 4, 4, 1, 1) self.b1scaleLabel = QtWidgets.QLabel(self.widget) self.b1scaleLabel.setObjectName("b1scaleLabel") self.gridLayout.addWidget(self.b1scaleLabel, 5, 0, 1, 1) self.muscleT1value = QtWidgets.QLineEdit(self.widget) self.muscleT1value.setObjectName("muscleT1value") self.gridLayout.addWidget(self.muscleT1value, 6, 1, 1, 1) self.T2echoValue = QtWidgets.QLineEdit(self.widget) self.T2echoValue.setValidator(QtGui.QDoubleValidator()) self.T2echoValue.setObjectName("T2echoValue") self.gridLayout.addWidget(self.T2echoValue, 8, 1, 1, 1) self.muscleFractionValue = QtWidgets.QLineEdit(self.widget) self.muscleFractionValue.setValidator(QtGui.QDoubleValidator()) self.muscleFractionValue.setObjectName("muscleFractionValue") self.gridLayout.addWidget(self.muscleFractionValue, 3, 1, 1, 1) self.muscleFractionLabel = QtWidgets.QLabel(self.widget) self.muscleFractionLabel.setObjectName("muscleFractionLabel") self.gridLayout.addWidget(self.muscleFractionLabel, 3, 0, 1, 1) self.label = QtWidgets.QLabel(self.widget) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 8, 0, 1, 1) self.fatT1label = QtWidgets.QLabel(self.widget) self.fatT1label.setObjectName("fatT1label") self.gridLayout.addWidget(self.fatT1label, 7, 0, 1, 1) self.retranslateUi(Dialog) self.buttonBox.accepted.connect(self.dialog_ok_clicked) self.buttonBox.rejected.connect(Dialog.reject) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "EPG")) self.fatT1value.setText(_translate("Dialog", "1450")) self.muscleFractionMax.setText(_translate("Dialog", "10")) self.fatFractionMin.setText(_translate("Dialog", "0")) self.fatFractionMax.setText(_translate("Dialog", "10")) self.b1scaleMax.setText(_translate("Dialog", "2")) self.muscleFractionMin.setText(_translate("Dialog", "0")) self.b1scaleValue.setText(_translate("Dialog", "1")) self.b1scaleMin.setText(_translate("Dialog", "0")) self.fatFractionLabel.setText(_translate("Dialog", "Fat Fraction")) self.fatFractionValue.setText(_translate("Dialog", ".3")) self.muscleT1label.setText(_translate("Dialog", "<html><head/><body><p>Muscle T<span style=\" vertical-align:sub;\">1</span> (ms)</p></body></html>")) self.fatT2min.setText(_translate("Dialog", "0")) self.maxHeadingLabel.setText(_translate("Dialog", "maximum")) self.minHeadingLabel.setText(_translate("Dialog", "minimum")) self.valueHeadingLabel.setText(_translate("Dialog", "value")) self.fatT2value.setText(_translate("Dialog", "200")) self.muscleT2value.setText(_translate("Dialog", "35")) self.fatT2label.setText(_translate("Dialog", "<html><head/><body><p>Fat T<span style=\" vertical-align:sub;\">2</span> (ms)</p></body></html>")) self.fatT2max.setText(_translate("Dialog", "2000")) self.muscleT2max.setText(_translate("Dialog", "150")) self.opimizedHeadingLabel.setText(_translate("Dialog", "optimized")) self.muscleT2label.setText(_translate("Dialog", "<html><head/><body><p>Muscle T<span style=\" vertical-align:sub;\">2</span> (ms)</p></body></html>")) self.muscleT2min.setText(_translate("Dialog", "0")) self.b1scaleLabel.setText(_translate("Dialog", "B<sub>1</sub> scale")) self.muscleT1value.setText(_translate("Dialog", "500")) self.T2echoValue.setText(_translate("Dialog", "10")) self.muscleFractionValue.setText(_translate("Dialog", "0.7")) self.muscleFractionLabel.setText(_translate("Dialog", "Muscle Fraction")) self.label.setText(_translate("Dialog", "<html><head/><body><p>T<span style=\" vertical-align:sub;\">2</span> Echo (ms)</p></body></html>")) self.fatT1label.setText(_translate("Dialog", "<html><head/><body><p>Fat T<span style=\" vertical-align:sub;\">1</span> (ms)</p></body></html>")) def dialog_ok_clicked(self): print("dialog_ok_clicked") self.Dialog.setResult(1) worked =self.get_fitparameters() if worked: self.params.pretty_print() self.Dialog.accept() def get_fitparameters( self ): print("self.optimizeFatFraction.isChecked()", self.optimizeFatFraction.isChecked() ) #epgt2fitparams = lm.Parameters() worked = True try: self.params.add(name='T2muscle', value = float(self.muscleT2value.text()), min = float(self.muscleT2min.text()), max = float(self.muscleT2max.text()), vary = self.optimizeMuscleT2.isChecked()) self.params.add(name='T2fat', value = float(self.fatT2value.text()), min = float(self.fatT2min.text()), max = float(self.fatT2max.text()), vary = self.optimizeFatT2.isChecked()) self.params.add(name='Amuscle', value = float(self.muscleFractionValue.text()), min = float(self.muscleFractionMin.text()), max = float(self.muscleFractionMax.text()), vary = self.optimizeMuscleFraction.isChecked()) self.params.add(name='Afat', value = float(self.fatFractionValue.text()), min = float(self.fatFractionMin.text()), max = float(self.fatFractionMax.text()), vary = self.optimizeFatFraction.isChecked()) self.params.add(name='B1scale', value = float(self.b1scaleValue.text()), min = float(self.b1scaleMin.text()), max = float(self.b1scaleMax.text()), vary = self.optimizeB1scale.isChecked()) self.params.add(name='T1muscle', value = float(self.muscleT1value.text()), vary = False) self.params.add(name='T1fat', value = float(self.fatT1value.text()), vary = False) self.params.add(name='echo', value = float(self.T2echoValue.text()), vary = False) buttonsChecked = [not self.optimizeFatFraction.isChecked(), not self.optimizeMuscleFraction.isChecked(), not self.optimizeMuscleT2.isChecked(), not self.optimizeFatT2.isChecked(), not self.optimizeB1scale.isChecked()] print(buttonsChecked) if all(buttonsChecked): worked=False self.lmparams['epgt2fitparams'] = self.params except: worked = False return worked if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Dialog = QtWidgets.QDialog() Dialog.setModal(False) lmparams = {} epgt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.01, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.1, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True) lmparams['epgt2fitparams']=epgt2fitparams ui = EpgT2paramsDialog(lmparams) ui.setupEpgT2paramsDialog(Dialog) rt=Dialog.open() print("Dialog.result() =",Dialog.result()) #print( "get_fitparameters(ui).items()", ui.get_fitparameters().items()) sys.exit(app.exec_())
/mriplotwidget.py
# -*- coding: utf-8 -*- """ Created on Wed Apr 17 14:34:43 2019 @author: neh69 """ import numpy as np import matplotlib from matplotlib import pyplot as plt #import seaborn as sns from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5 #import seaborn as sns if is_pyqt5(): print("pyqt5") from matplotlib.backends.backend_qt5agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) else: print("pyqt4") from matplotlib.backends.backend_qt4agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) #from matplotlib.figure import Figure import mplcursors #from ImageData import T2imageData parameterNames ={'T2m': [ 'T$_{2m}$ [ms]','{}, T$_{{2m}}$ = {:.1f} [ms]' ], 'Am100': [ 'A$_{m}$ [%]', '{}, A$_{{m}}$ = {:.1f} [%]' ], 'Af100': [ 'A$_{f}$ [%]', '{}, A$_{{f}}$ = {:.1f} [%]'], 'B1': [ 'B$_{1}$ [-]', '{}, B$_{{1}}$ = {:.1f} [-]'], 'fatPC': [ 'fat [%]', '{}, fat = {:.1f} [%]'] } class MRIPlotWidget(QtWidgets.QWidget): #class PlotWidget(QtWidgets.QWidget): def __init__(self, parent=None, showToolbar=True, imageData=None): super().__init__(parent) self.fig, self.ax = plt.subplots() # fig =Figure(figsize=(3, 5)) self.fig.set_tight_layout(True) self.plot_canvas = FigureCanvas(self.fig) # self.ax = self.fig.add_subplot(111) # mplcursors.cursor(fig,hover=True) self.layout = QtWidgets.QVBoxLayout(self) # def __init__( self, parent=None, showToolbar=True, imageData=None): self.axesList = [] self.imageData = imageData sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) self.toggleImage = QtWidgets.QRadioButton("Hide background Image") self.toggleImage.toggled.connect(lambda: self.toggleImageChanged(self.toggleImage)) self.toggleImage.isChecked() self.layout.addWidget(self.toggleImage) self.toggleImage.setSizePolicy(sizePolicy) self.sliceLabel = QtWidgets.QLabel("slices") self.layout.addWidget(self.sliceLabel) self.sliceLabel.setSizePolicy(sizePolicy) self.slicesSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal) self.slicesSlider.setMinimum(0) self.slicesSlider.setMaximum(4) self.slicesSlider.setValue(0) self.slicesSlider.setTickPosition(QtWidgets.QSlider.TicksBelow) self.slicesSlider.setTickInterval(1) self.slicesSlider.valueChanged.connect(self.valuechangedSlider) self.slicesSlider.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)) self.layout.addWidget(self.slicesSlider) self.echoesLabel = QtWidgets.QLabel("echoes") self.echoesLabel.setSizePolicy(sizePolicy) self.layout.addWidget(self.echoesLabel) self.echoesSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal) self.echoesSlider.setMinimum(0) self.echoesSlider.setMaximum(16) self.echoesSlider.setValue(0) self.echoesSlider.setTickPosition(QtWidgets.QSlider.TicksBelow) self.echoesSlider.setTickInterval(1) self.echoesSlider.valueChanged.connect(self.valuechangedSlider) self.echoesSlider.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)) self.layout.addWidget(self.echoesSlider) self.layout.addWidget(self.plot_canvas) if showToolbar: self.toolbar = NavigationToolbar(self.plot_canvas, self) self.layout.addWidget(self.toolbar) self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) self.updateGeometry() self.plot_canvas.mpl_connect('button_press_event', self.onclick) # self.plot_canvas.mpl_connect("motion_notify_event", self.onhover) self.ax.imshow(matplotlib.image.imread('vision.png')[:,:,0]) # self.canvas.figure.axes # self.mpl_cursor = mplcursors.cursor(self.plot_canvas.figure.axes,hover=True) self.ax.grid(False) def valuechangedSlider(self): slice_ = self.slicesSlider.value() echo = self.echoesSlider.value() self.imageData.currentSlice = slice_ self.imageData.currentEcho = echo print("slicesSlider Value =", slice_, "echoesSlider Value =", echo ) if isinstance(self.imageData.ImageDataT2, np.ndarray): print("updating image slice") if self.toggleImage.isChecked(): self.imageData.mriSliceIMG *= 0.0 else: self.imageData.mriSiceIMG=self.imageData.ImageDataT2[:,:,slice_,echo].copy() self.imageData.overlayRoisOnImage(slice_+1, self.imageData.fittingParam) self.update_plot(self.imageData.mriSiceIMG, self.imageData.maskedROIs.reshape(self.imageData.mriSiceIMG.shape)) self.histPlotWidget.update_plot([slice_+1,self.imageData.T2slices,self.imageData.dixonSlices], [self.imageData.t2_data_summary_df,self.imageData.dixon_data_summary_df], self.imageData.fittingParam) self.barPlotWidget.update_plot([slice_+1,self.imageData.T2slices,self.imageData.dixonSlices], [self.imageData.t2_data_summary_df,self.imageData.dixon_data_summary_df], self.imageData.fittingParam) else: print("No images to update") def on_fittingParams_rbtn_toggled(self, fittingParam): # rb = self.fittingParams_rbtn.sender() print(fittingParam) self.imageData.fittingParam = fittingParam self.valuechangedSlider() def register_PlotWidgets(self, T2PlotWidget, histPlotWidget, barPlotWidget, radioButtonsWidget): self.T2PlotWidget = T2PlotWidget self.histPlotWidget = histPlotWidget self.barPlotWidget = barPlotWidget self.radioButtonsWidget = radioButtonsWidget # def onhover(self,event): # # if event.inaxes: # # xcoord = int(round(event.xdata)) # ycoord = int(round(event.ydata)) # # print('on hover, ', xcoord, ycoord) def onclick(self,event): xcoord = int(round(event.xdata)) ycoord = int(round(event.ydata)) print("MRI Plot window On Click") print('ycoord =', ycoord) print(type(self.imageData.ImageDataT2)) if type(self.imageData.ImageDataT2) != type(None): image_shape = self.imageData.ImageDataT2.shape print(image_shape[0],image_shape[0]-ycoord, ycoord) t2data = self.imageData.ImageDataT2[ycoord,xcoord,int(self.slicesSlider.value()),:] self.T2PlotWidget.update_plot( xcoord, ycoord, t2data) def update_plot(self, img, maskedROIs): self.ax.cla() self.ax.imshow(img,cmap=plt.cm.gray, interpolation='nearest') print("maskedROIs.shape", maskedROIs.shape) print("img.shape", img.shape) print("maskedROIs.max()",maskedROIs.max()) if maskedROIs.max() > 0: self.ax.imshow(maskedROIs.reshape(img.shape), cmap=plt.cm.jet, alpha=.5, interpolation='bilinear') mpl_cursor = mplcursors.cursor(self.plot_canvas.figure.axes,hover=True) @mpl_cursor.connect("add") def _(sel): ann = sel.annotation ttt = ann.get_text() xc,yc, zl = [s.split('=') for s in ttt.splitlines()] x = round(float(xc[1])) y = round(float(yc[1])) print("x",x, "y",y) nrows,ncols = img.shape cslice=self.imageData.currentSlice fitParam = self.imageData.fittingParam print("cslice",cslice, "nrows", nrows, "ncols") print("fitParam",fitParam) ### figure out which data set to use slice_df = None if fitParam in self.imageData.t2_data_summary_df.columns: print(fitParam, "T2 dataFrame chosen") data_df = self.imageData.t2_data_summary_df slice_df = data_df[data_df.slice==cslice+1] elif fitParam in self.imageData.dixon_data_summary_df.columns: print(fitParam, "Dixon dataFrame chosen") data_df = self.imageData.dixon_data_summary_df if cslice+1 in self.imageData.T2slices: dixonSliceIndex = self.imageData.dixonSlices[self.imageData.T2slices.index(cslice+1)] slice_df = data_df[data_df.slice==dixonSliceIndex] else: slice_df = data_df[data_df.slice==cslice] ### return current slice # slice_df = data_df[data_df.slice==cslice+1] roiList = [] valueList=[] if not isinstance(slice_df, type(None)): print("type(slice_df)",type(slice_df)) print("slice_df.shape",slice_df.shape) roiList = slice_df[slice_df['pixel_index']==y*ncols+x]['roi'].values valueList = slice_df[slice_df['pixel_index']==y*ncols+x][fitParam].values print("roiList", roiList) print("valueList",valueList) fitParamLabel = parameterNames[fitParam][1] if len(roiList)>0: roi=roiList[0] value=valueList[0] ann.set_text(fitParamLabel.format( roi, value)) else: ann.set_text("x = {:d}\ny = {:d}".format( x, y )) self.ax.grid(False) self.plot_canvas.draw() def toggleImageChanged(self,b1): print("Entered toggleImageChanged") if not isinstance(self.imageData.mriSliceIMG, type(None) ): if self.toggleImage.isChecked(): print("Clear background image") self.update_plot(np.zeros((self.imageData.mriSliceIMG.shape)), self.imageData.maskedROIs.reshape((self.imageData.mriSliceIMG.shape))) else: self.valuechangedSlider()
/simple_pandas_plot.py
# -*- coding: utf-8 -*- """ Created on Thu Jul 20 10:29:38 2017 @author: neh69 """ import os import sys import numpy as np import pandas as pd import lmfit as lm import matplotlib import matplotlib.pyplot as plt import seaborn as sns from PyQt5 import QtCore, QtWidgets import visionplot_widgets import mriplotwidget from ImageData import T2imageData def openStudyDir(): dlg = QtWidgets.QFileDialog() returned_data = dlg.getExistingDirectory(None, "Study Directory", "") print("openStudyDir\n",returned_data, type(returned_data)) # tree_window.setRootIndex(tree_window.model.index(returned_data)) def openNiftiAnalyzeFile(): dlg = QtWidgets.QFileDialog() returned_data = dlg.getOpenFileName(None, "MRI data nifti/analyze", procDataDirPath, "nii files (*.nii);;analyze files (*.img);;All files (*)") print(returned_data) def getH5file(): dlg = QtWidgets.QFileDialog() returned_data = dlg.getOpenFileName(None, "select results file", procDataDirPath, "CSV files (*.csv);;All files (*)") pathandfilename = returned_data[0] #self.hd5_store = pd.HDFStore(pathandfilename) if len(pathandfilename) > 0: ### attempt to extract details from data print(pathandfilename) imageData.readin_alldata_from_results_filename( os.path.abspath(pathandfilename)) if imageData.read_T2_img_hdr_files(): print("just before read_T2_data()") if imageData.read_T2_data(): imageData.read_Dixon_data() print("just after read_T2_data()") mainWindow.setWindowTitle(imageData.T2resultsFilenameAndPath) #### Update image displayed in window imageData.overlayRoisOnImage(0, imageData.fittingParam) # mri_window.update_plot(imageData.img1) mri_window.update_plot(imageData.mriSliceIMG, imageData.maskedROIs) print("type(imageData.ImageDataT2)",type(imageData.ImageDataT2)) hist_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m") bar_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m") #### set min max on sliders mri_window.slicesSlider.setMinimum(0) mri_window.slicesSlider.setMaximum(imageData.numSlicesT2-1) mri_window.slicesSlider.setValue(0) mri_window.echoesSlider.setMinimum(0) mri_window.echoesSlider.setMaximum(imageData.numEchoesT2-1) mri_window.slicesSlider.setValue(0) else: print(imageData.t2_image_hdr_pathfilename, " not found") def fileQuit(self): self.close() def closeEvent(self, ce): self.fileQuit() if __name__ == "__main__": lmparams = {} epgt2fitparams = lm.Parameters() azzt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True ) azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None), ('Amuscle', 40.0, True, 0, 250, None), ('T2muscle', 40.0, True, 0, 100, None), ('c_l', 0.55, False, 0, 2000, None), ('c_s', 0.45, False, 0, 2000, None), ('t2_fl', 250.0, False, 0, 2000, None), ('t2_fs', 43.0, False, 0, 2000, None), ('echo', 10.0, False, 0, 2000, None)) lmparams['epgt2fitparams'] = epgt2fitparams lmparams['azzt2fitparams'] = azzt2fitparams params=azzt2fitparams matplotlib.use('Qt5Agg') plt.style.context('seaborn-colorblind') sns.set(font_scale = 0.6) # sns.set_palette("pastel") procDataDirPath = r"/home/eric/Documents/projects/programming/2019/mri_progs/T2EPGviewer/studyData/testStudy/HC-001/sess-1/upperleg/T2/results/muscle/AzzEPG" progname = os.path.basename(sys.argv[0]) qApp = QtWidgets.QApplication(sys.argv) imageData = T2imageData() print("imageData.fittingParam:",imageData.fittingParam) mainWindow = QtWidgets.QMainWindow() mainWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose) mainWindow.setWindowTitle("application main window") file_menu = QtWidgets.QMenu('&File', mainWindow) # file_menu.addAction("&Open study Directory", openStudyDir) file_menu.addAction('&Choose Study Results File', getH5file, QtCore.Qt.CTRL + QtCore.Qt.Key_H) # file_menu.addAction('&Open nifti/analyze image File', openNiftiAnalyzeFile ) # file_menu.addAction('&Choose Rois', imageData.getRoiFiles, QtCore.Qt.CTRL + QtCore.Qt.Key_R) # file_menu.addAction('&Quit', fileQuit, QtCore.Qt.CTRL + QtCore.Qt.Key_Q) mainWindow.menuBar().addMenu(file_menu) main_widget = QtWidgets.QWidget(mainWindow) mainlayout = QtWidgets.QHBoxLayout(main_widget) # mainWindow.setCentralWidget(main_widget) # plot_window1 = mri_widget(main_widget) npts = 256*100 iii = np.random.permutation(np.arange(255*255))[:npts] ddd = np.random.randn(npts)*100+500 data_df = pd.DataFrame({'iii': iii, 'ddd':ddd}) leftwindow = QtWidgets.QWidget() rightwindow = QtWidgets.QWidget() splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal) hlayout = QtWidgets.QHBoxLayout(leftwindow) vlayout = QtWidgets.QVBoxLayout(rightwindow) mri_window = mriplotwidget.MRIPlotWidget( imageData=imageData) rbtns_window = visionplot_widgets.radiobuttons_fitWidget(mri_window=mri_window) t2plot_window = visionplot_widgets.T2PlotWidget( lmparams, showToolbar=False) bar_window = visionplot_widgets.BarPlotWidget( showToolbar=False, data_df=data_df, image_size=256) hist_window = visionplot_widgets.HistogramPlotWidget( mri_plot=mri_window, showToolbar=True,data_df=data_df, image_size=256) mainlayout.addWidget(splitHwidget) hlayout.addWidget(rbtns_window) hlayout.addWidget(mri_window) vlayout.addWidget(t2plot_window) vlayout.addWidget(bar_window) vlayout.addWidget(hist_window) splitHwidget.addWidget(leftwindow) splitHwidget.addWidget(rightwindow ) mri_window.register_PlotWidgets(t2plot_window, bar_window, hist_window, rbtns_window) main_widget.setFocus() mainWindow.setCentralWidget(main_widget) mainWindow.show() sys.exit(qApp.exec_())
/t2fit.py
# -*- coding: utf-8 -*- """ Created on Sat Mar 3 11:30:41 2018 @author: ERIC """ import numpy as np import lmfit from epg import cpmg_epg_b1 as cpmg_epg_b1_c from scipy import integrate mxyz90 = np.fromfile( 'epg/mxyz90.txt', sep=' ' ) mxyz180 = np.fromfile('epg/mxyz180.txt', sep=' ') mxyz90 = mxyz90.reshape(5,512) mxyz180 = mxyz180.reshape(5,512) offset=130 step=10 epg_slice_xxx =mxyz90[0][offset:-offset+step:step] # mm epg_p90 = mxyz90[-1][offset:-offset+step:step] # degrees epg_p180 = mxyz180[-1][offset:-offset+step:step] # degrees epg_dx=epg_slice_xxx[1]-epg_slice_xxx[0] def fit_cpmg_epg_muscle_philips_hargreaves_c( params, xxx, dx, p90_array, p180_array, yyy_exp=None): parvals = params.valuesdict() T1fat = parvals[ 'T1fat' ] # fixed T1muscle = parvals[ 'T1muscle' ] # fixed echo = parvals[ 'echo' ] # fixed T2fat = parvals[ 'T2fat' ] # fixed/optimized T2muscle = parvals['T2muscle'] # optimized Afat = parvals[ 'Afat'] # optimized Amuscle = parvals['Amuscle'] # optimized B1scale = parvals['B1scale'] Nechos = len(xxx) Ngauss = len(p90_array) signal = np.zeros([Ngauss,Nechos]) fat_signal = np.zeros(Nechos) muscle_signal = np.zeros(Nechos) for i,(p90,p180) in enumerate(zip(p90_array,p180_array)): cpmg_epg_b1_c( fat_signal, p90, p180, T1fat, T2fat, echo, B1scale ) cpmg_epg_b1_c( muscle_signal, p90, p180, T1muscle, T2muscle, echo, B1scale ) signal[i] = Afat*fat_signal+Amuscle*muscle_signal int_signal = integrate.simps(signal, dx=dx,axis=0) if isinstance(yyy_exp, np.ndarray): return( int_signal-yyy_exp) else: return(int_signal) def calculate_T2values_on_slice_muscleEPG(lmparams, yyy_exp): # params = lmfit.Parameters() # params.add('T2fat', value = 180.0, min=0, max=5000, vary=False) # params.add('T2muscle', value = 35, min=0, max=100, vary=True ) # params.add('Afat', value = 0.01, min=0, max=10, vary=True ) # params.add('Amuscle', value = 0.1, min=0, max=10, vary=True ) # params.add('T1fat', value = 365.0, vary=False) # params.add('T1muscle', value = 1400, vary=False) # params.add('echo', value = 10.0, vary=False) params = lmparams['epgt2fitparams'] echo_time = params['echo'].value num_echoes = yyy_exp.size parvals = params.valuesdict() print("parvals") for k,v in parvals.items(): print(k,v) print("EPG echo time =", echo_time) xxx = np.linspace( echo_time, echo_time*num_echoes, num_echoes) dx = xxx[1]-xxx[0] yyy_exp_max =yyy_exp.max() if yyy_exp_max == 0: yyy_exp_max = 1.0 yyy_exp_norm = yyy_exp/yyy_exp_max fitModel = lmfit.Minimizer(fit_cpmg_epg_muscle_philips_hargreaves_c, lmparams['epgt2fitparams'], fcn_args=( xxx, dx, epg_p90, epg_p180, yyy_exp_norm)) results = fitModel.minimize() fit_plot = np.zeros(num_echoes) if results.success: fit_plot = results.residual + yyy_exp_norm return( fit_plot, yyy_exp_norm, results, xxx) def calculate_T2values_on_slice_muscleAzz(lmparams, yyy_exp): params = lmparams['azzt2fitparams'] echo_time = params['echo'].value num_echoes = yyy_exp.size model = lmfit.models.ExpressionModel('Afat * (c_l*exp(-x/t2_fl)+c_s*exp(-x/t2_fs)) + Amuscle * (exp(-x/T2muscle))') parvals = params.valuesdict() print("parvals") for k,v in parvals.items(): print(k,v) print("azzabou echo time", echo_time) # saved_output = {'T2muscle_value': [], # 'T2muscle_stderr': [], # 'Amuscle_value': [], # 'Amuscle_stderr': [], # 'Afat_value': [], # 'Afat_stderr': [], # 'chisqr': [], # 'redchi':[], # 'AIC':[], # 'BIC':[], # 'slice':[], # 'pixel_index':[], # } xxx = np.linspace( echo_time, echo_time*num_echoes, num_echoes) yyy_exp_max = yyy_exp.max() fit_plot = np.zeros(num_echoes-2) if yyy_exp_max == 0.0: yyy_exp_max = 1.0 yyy_exp_norm = yyy_exp/yyy_exp_max print("fitting data") results = model.fit(yyy_exp_norm[2:] , x=xxx[2:], params=lmparams['azzt2fitparams']) #mi.plot() #saved_output['name'].append('t2_m') # saved_output['T2muscle_value'].append(results.params['T2muscle'].value) # saved_output['T2muscle_stderr'].append(results.params['T2muscle'].stderr) # saved_output['chisqr'].append(results.chisqr) # saved_output['redchi'].append(results.redchi) # saved_output['AIC'].append(results.aic) # saved_output['BIC'].append(results.bic) # # # saved_output['Amuscle_value'].append(results.params['Amuscle'].value) # saved_output['Amuscle_stderr'].append(results.params['Amuscle'].stderr) # saved_output['Afat_value'].append(results.params['Afat'].value) # saved_output['Afat_stderr'].append(results.params['Afat'].stderr) fit_plot = results.residual + yyy_exp_norm[2:] return( fit_plot, yyy_exp_norm, results, xxx)
/visionplot_widgets.py
# -*- coding: utf-8 -*- """ Created on Wed Feb 28 13:11:07 2018 @author: neh69 """ import sys import numpy as np #import matplotlib import pandas as pd #import mplcursors from uncertainties import ufloat import t2fit import lmfit as lm from matplotlib import pyplot as plt #import seaborn as sns from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5 import seaborn as sns if is_pyqt5(): print("pyqt5") from matplotlib.backends.backend_qt5agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) else: print("pyqt4") from matplotlib.backends.backend_qt4agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure from ImageData import T2imageData import epgT2paramsDialog import azzT2paramsDialog #mxyz90 = np.fromfile( 'epg\mxyz90.txt', sep=' ' ) #mxyz180 = np.fromfile('epg\mxyz180.txt', sep=' ') # #mxyz90 = mxyz90.reshape(5,512) #mxyz180 = mxyz180.reshape(5,512) # #offset=130 #step=10 #epg_slice_xxx =mxyz90[0][offset:-offset+step:step] # mm #epg_p90 = mxyz90[-1][offset:-offset+step:step] # degrees #epg_p180 = mxyz180[-1][offset:-offset+step:step] # degrees #epg_dx=epg_slice_xxx[1]-epg_slice_xxx[0] class PlotWidget(QtWidgets.QWidget): def __init__(self, parent=None, showToolbar=True): super(PlotWidget,self).__init__(parent) fig =Figure(figsize=(3, 5)) fig.set_tight_layout(True) self.plot_canvas = FigureCanvas(fig) self.ax = fig.add_subplot(111) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.plot_canvas) if showToolbar: self.toolbar = NavigationToolbar(self.plot_canvas, self) self.layout.addWidget(self.toolbar) def return_ax(self): return(self.ax) class HistogramPlotWidget(PlotWidget): def __init__(self, parent=None, showToolbar=False, mri_plot=None, data_df=None, image_size=256): self.data_df = data_df self.image_size = image_size super(HistogramPlotWidget,self).__init__(parent=parent, showToolbar=showToolbar) self.buttonUpdate = QtWidgets.QPushButton('Update') self.buttonUpdate.clicked.connect(self.update) self.layout.addWidget(self.buttonUpdate) def update(self): print((self.ax.get_xlim())) xmin,xmax = self.ax.get_xlim() def update_plot(self, slice_info,data_dframes, plot_param): self.ax.cla() self.plot_canvas.draw() print("Entered HistogramPlotWidget.update_image, plot_param =", plot_param) data_df=None slice_displayed = slice_info[0] T2_slices = slice_info[1] dixon_slices = slice_info[2] print("data_dframes[0]", type(data_dframes[0]), data_dframes[0].columns) print("data_dframes[1]", type(data_dframes[1]), data_dframes[1].columns) if isinstance(data_dframes[0],pd.core.frame.DataFrame): if plot_param in data_dframes[0].columns: print("plot_param {} found in dataframe is T2".format(plot_param)) data_df = data_dframes[0] data_df=data_df[data_df["slice"]==slice_displayed] elif isinstance(data_dframes[1],pd.core.frame.DataFrame): print("plot_param {} found in dataframe is Dixon".format(plot_param)) print("data_dframes[1].columns",data_dframes[1].columns) if plot_param in data_dframes[1].columns: print("plot_param in data_dframes[1]:", plot_param) data_df = data_dframes[1] if slice_displayed in T2_slices: slice_displayed = dixon_slices[T2_slices.index(slice_displayed)] data_df=data_df[data_df["slice"]==slice_displayed] else: print( "HIST", plot_param, " not found") return False else: print("HIST", isinstance(data_dframes[1],pd.core.frame.DataFrame)) return False print("HIST data_df.shape[0]",data_df.shape[0]) if data_df.shape[0] == 0 or type(data_df) == type(None): print("HIST return because df shape[0] = 0 or type of data_df = type None") return False # self.ax2.cla() if isinstance(data_df, pd.core.frame.DataFrame): print("Plotting HIST Plot" ) data_df = data_df.sort_values(by=['roi']) #plot_param = "T2value" for roi in data_df.roi.unique(): print(roi) query_str = '(slice == {}) and (roi == "{}")'.format(slice_displayed, roi) sns.distplot(data_df.query(query_str)[plot_param], hist=False, label=roi, ax=self.ax) # self.ax.hist( data_df.query(query_str)[plot_param], bins=100, label=roi, alpha=0.7); self.ax.legend() if plot_param == "T2m": self.ax.set_xlabel("$T_2$ [ms]") elif plot_param == "Am100": self.ax.set_xlabel("$A_m$ [%]") elif plot_param == "Af100": self.ax.set_xlabel("$A_f$ [%]") elif plot_param == "B1": self.ax.set_xlabel("$B_1$") elif plot_param == "fatPC": self.ax.set_xlabel("ff [%]") self.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) self.plot_canvas.draw() return True class BarPlotWidget(PlotWidget): def __init__(self, parent=None, showToolbar=True, data_df=None, image_size=256): self.data_df = data_df self.image_size = image_size super(BarPlotWidget,self).__init__(parent=parent, showToolbar=showToolbar) # self.buttonUpdate = QtWidgets.QPushButton('Update') # self.buttonUpdate.clicked.connect(self.update) # self.layout.addWidget(self.buttonUpdate) def update(self): print((self.ax.get_xlim())) xmin,xmax = self.ax.get_xlim() def update_plot(self, slice_info,data_dframes, plot_param): self.ax.cla() self.plot_canvas.draw() print("Entered BarPlotWidget.update_image, plot_param =", plot_param) #print(data_.columns) slice_displayed = slice_info[0] T2_slices = slice_info[1] dixon_slices = slice_info[2] data_df=None print("data_dframes[0]", type(data_dframes[0]), data_dframes[0].columns) print("data_dframes[1]", type(data_dframes[1]), data_dframes[1].columns) if isinstance(data_dframes[0],pd.core.frame.DataFrame): if plot_param in data_dframes[0].columns: print("plot_param {} found in dataframe is T2".format(plot_param)) data_df = data_dframes[0] data_df=data_df[data_df["slice"]==slice_displayed] elif isinstance(data_dframes[1],pd.core.frame.DataFrame): print("plot_param {} found in dataframe is Dixon".format(plot_param)) print("data_dframes[1].columns",data_dframes[1].columns) if plot_param in data_dframes[1].columns: print("plot_param in data_dframes[1]:", plot_param) data_df = data_dframes[1] if slice_displayed in T2_slices: slice_displayed = dixon_slices[T2_slices.index(slice_displayed)] # else: # dixon_slice = slice_displayed # slice_displayed = dixon_slices[T2_slices.index(slice_displayed)] data_df=data_df[data_df["slice"]==slice_displayed] else: print( plot_param, " not found") return(False) else: print(isinstance(data_dframes[1],pd.core.frame.DataFrame)) return(False) print("HIST data_df.shape[0]", data_df.shape[0]) if data_df.shape[0] == 0 or type(data_df) == type(None): print("return because df shape[0] = 0 or type of data_df = type None") return False data_df = data_df.sort_values(by=['roi']) if isinstance(data_df, pd.core.frame.DataFrame): print("Plotting BAR Plot" ) #plot_param = "T2value" # for roi in data_df.roi.unique(): # print(roi) # query_str = '(slice == {}) and (roi == "{}")'.format(slice_displayed, roi) # self.ax.hist( data_df.query(query_str)[plot_param], bins=100, label=roi, alpha=0.4); # self.ax.legend() # numRois = data_df.roi.unique().shape[0] sns.catplot( kind='bar', x='slice', y=plot_param, data=data_df, hue='roi', ci="sd", ax=self.return_ax() ); self.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) if plot_param == "T2m": self.ax.set_ylabel("$T_2$ [ms]") elif plot_param == "Am100": self.ax.set_ylabel("$A_m$ [%]") elif plot_param == "Af100": self.ax.set_ylabel("$A_f$ [%]") elif plot_param == "B1": self.ax.set_ylabel("$B_1$") elif plot_param == "fatPC": self.ax.set_ylabel("ff [%]") self.ax.set_xlabel("slices") # plt.tight_layout() self.plot_canvas.draw() return True class T2PlotWidget(PlotWidget): def __init__( self, lmparams, parent=None, showToolbar=True): super(T2PlotWidget, self).__init__(parent, showToolbar=showToolbar) self.plot_T2_startup() self.lmparams = lmparams self.T2epgnorm_btns = radiobuttons_EPGWidget(self.lmparams, self) self.layout.addWidget(self.T2epgnorm_btns) def plot_T2_startup(self): ttt = np.linspace(0,170, 17) yyy = 80*np.exp(-ttt/35.0)+20*np.exp(-ttt/120.0) yyy1 = yyy+np.random.randn(len(yyy)) self.ax.semilogy(ttt, yyy1, 'o') self.ax.semilogy(ttt, yyy, '-') self.ax.set_xlabel('Time [ms]') self.ax.set_ylabel('Signal') self.ax.set_ylim(1,110) def update_plot(self, xcoord, ycoord, t2data): print("update_T2PlotImag called") #self.ttt = np.linspace(0,170, 17) self.ax.cla() # clear the plot area if self.T2epgnorm_btns.epg_rbtn.isChecked(): print("Run EPG Fit") print('echo value', self.lmparams['epgt2fitparams']['echo']) # params = lm.Parameters() # params.add('T2fat', value = 180.0, min=0, max=5000, vary=False) # params.add('T2muscle', value = 35, min=0, max=100, vary=True ) # params.add('Afat', value = 0.01, min=0, max=10, vary=True ) # params.add('Amuscle', value = 0.1, min=0, max=10, vary=True ) # params.add('T1fat', value = 365.0, vary=False) # params.add('T1muscle', value = 1400, vary=False) # params.add('echo', value = 10.0, vary=False) #xxx = np.linspace(10,10*len(t2data), len(t2data)) # self.params.pretty_print() #fit_values, fit_curve, fit_data, lmresults = t2fit.calculate_T2values_on_slice_muscleEPG(self.lmparams, t2data, len(t2data), xxx, epg_dx, epg_p90, epg_p180) fit_curve, fit_data, lmresults, xxx = t2fit.calculate_T2values_on_slice_muscleEPG(self.lmparams, t2data) else: print("Run Normal T2 Fit") fit_curve, fit_data, lmresults, xxx = t2fit.calculate_T2values_on_slice_muscleAzz(self.lmparams,t2data) print(dir(lmresults)) print(lmresults.success) if not lmresults.success: return # # Create uncertainty floats of varied params # ufs = {} for vname in lmresults.var_names: v = lmresults.params[vname].value e = lmresults.params[vname].stderr ufs[vname] = ufloat( v,e) if ('Amuscle' in ufs.keys()) and ('Afat' in ufs.keys()): ufs['Amuscle'] = 100.0*ufs['Amuscle']/(ufs['Amuscle']+ufs['Afat']) ufs['Afat'] = 100.0-ufs['Amuscle'] t2m_str = "" t2f_str = "" Am_str = "" Af_str = "" B1_str = "" for name, value in ufs.items(): print(name) if name == 'T2muscle': t2m_str = "$T_{{2m}}$ = ${:5.2fL}$ ms\n".format(value) elif name == 'T2fat': t2f_str = "$T_{{2f}}$ = ${:5.2fL}$ ms\n".format(value) elif name == 'Amuscle': Am_str = "$A_m$ = ${:5.2fL}$\n".format(value) elif name == 'Afat': Af_str = "$A_f$ = ${:5.2fL}$\n".format(value) elif name == 'B1scale': B1_str = "$B_1$ scale = ${:5.2fL}$\n".format(value) results_legend = "{}{}{}{}{}".format(t2m_str, t2f_str, Am_str, Af_str, B1_str) if self.T2epgnorm_btns.epg_rbtn.isChecked(): self.ax.semilogy(xxx, 100*fit_data, 'o') self.ax.semilogy(xxx, 100*fit_curve, '-', label=results_legend) else: self.ax.semilogy(xxx[2:], 100*fit_curve, '-', label=results_legend) self.ax.semilogy(xxx, 100*fit_data, 'o') self.ax.legend( fontsize=8) #self.ax.set_ylim(1,110) self.ax.set_xlabel('Time [ms]') self.ax.set_ylabel('Signal') self.ax.set_ylim(0.5,150) self.plot_canvas.draw() class radiobuttons_EPGWidget(QtWidgets.QWidget): def __init__(self, lmparams, parent=None): self.lmparams = lmparams self.epgDialog = QtWidgets.QDialog() self.epgT2params_widget = epgT2paramsDialog.EpgT2paramsDialog(self.lmparams) self.epgT2params_widget.setupEpgT2paramsDialog(self.epgDialog) self.azzDialog = QtWidgets.QDialog() self.azzT2params_widget = azzT2paramsDialog.AzzT2paramsDialog(self.lmparams) self.azzT2params_widget.setupAzzT2paramsDialog(self.azzDialog) super(radiobuttons_EPGWidget, self).__init__(parent) hlayout = QtWidgets.QHBoxLayout(self) group_rbtns = QtWidgets.QButtonGroup() group_rbtns.exclusive() self.epg_rbtn = QtWidgets.QRadioButton("EPG T2") self.norm_rbtn = QtWidgets.QRadioButton("normal T2") self.norm_rbtn.setChecked(True); self.T2params_btn = QtWidgets.QPushButton("T2 Parameters") self.epg_rbtn.fittingParam = "epg" self.norm_rbtn.fittingParam= 'norm' self.epg_rbtn.toggled.connect(lambda:self.btnstate(self.epg_rbtn)) self.norm_rbtn.toggled.connect(lambda:self.btnstate(self.norm_rbtn)) self.T2params_btn.clicked.connect(self.T2params_btn_clicked) group_rbtns.addButton(self.epg_rbtn) group_rbtns.addButton(self.norm_rbtn) hlayout.addWidget(self.norm_rbtn) hlayout.addWidget(self.epg_rbtn) hlayout.addStretch(1) hlayout.addWidget(self.T2params_btn) def T2params_btn_clicked(self): print("T2params_btn_clicked") if self.epg_rbtn.isChecked(): rt = self.epgDialog.show() else: rt = self.azzDialog.show() print("rt =", rt) def btnstate(self,b): if b.isChecked(): print(b.text()) print(b.fittingParam) #self.mri_window.on_fittingParams_rbtn_toggled( str(b.fittingParam)) class radiobuttons_fitWidget(QtWidgets.QWidget): def __init__(self, parent=None, mri_window=None): super(radiobuttons_fitWidget, self).__init__(parent) self.mri_window = mri_window vbox1_radiobuttons = QtWidgets.QVBoxLayout(self) group_fittingParams_rbtns = QtWidgets.QButtonGroup() group_fittingParams_rbtns.exclusive() self.T2_rbtn = QtWidgets.QRadioButton("T2") self.Am_rbtn = QtWidgets.QRadioButton("Am") self.Af_rbtn = QtWidgets.QRadioButton("Af") self.B1_rbtn = QtWidgets.QRadioButton("B1") self.Dixon_rbtn = QtWidgets.QRadioButton("Dixon Fat [%]") self.T2_rbtn.setChecked(True) self.T2_rbtn.fittingParam = "T2m" self.Am_rbtn.fittingParam = "Am100" self.Af_rbtn.fittingParam = "Af100" self.B1_rbtn.fittingParam = "B1" self.Dixon_rbtn.fittingParam = "fatPC" self.T2_rbtn.toggled.connect(lambda:self.btnstate(self.T2_rbtn)) self.Am_rbtn.toggled.connect(lambda:self.btnstate(self.Am_rbtn)) self.Af_rbtn.toggled.connect(lambda:self.btnstate(self.Af_rbtn)) self.B1_rbtn.toggled.connect(lambda:self.btnstate(self.B1_rbtn)) self.Dixon_rbtn.toggled.connect(lambda:self.btnstate(self.Dixon_rbtn)) group_fittingParams_rbtns.addButton(self.T2_rbtn) group_fittingParams_rbtns.addButton(self.Am_rbtn) group_fittingParams_rbtns.addButton(self.Af_rbtn) group_fittingParams_rbtns.addButton(self.B1_rbtn) group_fittingParams_rbtns.addButton(self.Dixon_rbtn) vbox1_radiobuttons.addWidget(self.T2_rbtn) vbox1_radiobuttons.addWidget(self.Am_rbtn) vbox1_radiobuttons.addWidget(self.Af_rbtn) vbox1_radiobuttons.addWidget(self.B1_rbtn) vbox1_radiobuttons.addWidget(self.Dixon_rbtn) vbox1_radiobuttons.addStretch(1) def btnstate(self,b): if b.isChecked(): print(b.text()) print(b.fittingParam) self.mri_window.on_fittingParams_rbtn_toggled( str(b.fittingParam)) class ApplicationWindow(QtWidgets.QMainWindow): def __init__(self, params): self.params = params imageData = T2imageData() print("imageData.fittingParam:",imageData.fittingParam) npts = 256*100 iii = np.random.permutation(np.arange(255*255))[:npts] ddd = np.random.randn(npts)*100+500 data_df = pd.DataFrame({'iii': iii, 'ddd':ddd}) super(ApplicationWindow, self).__init__() leftwindow = QtWidgets.QWidget() rightwindow = QtWidgets.QWidget() splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal) #hlayout = QtWidgets.QHBoxLayout(self._main) hlayout = QtWidgets.QHBoxLayout(leftwindow) vlayout = QtWidgets.QVBoxLayout(rightwindow) mriplot_window = MRIPlotWidget(imageData=imageData) rbtns_window = radiobuttons_fitWidget(mri_window=mriplot_window) t2plot_window = T2PlotWidget( self.params, showToolbar=False) h1_window = PlotWidget( showToolbar=False) h2_window = HistogramPlotWidget(showToolbar=True) #hlayout.addWidget(mriplot_window) mriplot_window.register_PlotWidgets(t2plot_window, h1_window, h2_window) #vbox1_radiobuttons = QtWidgets.QVBoxLayout() # hbox.addLayout(vbox1_radiobuttons) # hbox.addLayout(vbox1_image) # hbox.addLayout(vbox2_image) hlayout.addWidget(rbtns_window) hlayout.addWidget(mriplot_window) vlayout.addWidget(t2plot_window) vlayout.addWidget(h1_window) vlayout.addWidget(h2_window) def func3(x, y): return (1 - x / 2 + x**5 + y**3) * np.exp(-(x**2 + y**2)) # make these smaller to increase the resolution dx, dy = 0.05, 0.05 x = np.arange(-3.0, 3.0, dx) y = np.arange(-3.0, 3.0, dy) X, Y = np.meshgrid(x, y) # when layering multiple images, the images need to have the same # extent. This does not mean they need to have the same shape, but # they both need to render to the same coordinate system determined by # xmin, xmax, ymin, ymax. Note if you use different interpolations # for the images their apparent extent could be different due to # interpolation edge effects extent = np.min(x), np.max(x), np.min(y), np.max(y) Z1 = np.add.outer(range(8), range(8)) % 2 # chessboard mriplot_window.return_ax().imshow(Z1, cmap=plt.cm.gray, interpolation='nearest', extent=extent) Z2 = func3(X, Y) mriplot_window.return_ax().imshow(Z2, cmap=plt.cm.viridis, alpha=.9, interpolation='bilinear', extent=extent) splitHwidget.addWidget(leftwindow) splitHwidget.addWidget(rightwindow ) print(data_df.head()) plot_image = np.zeros(255*255) plot_image[data_df['iii']] = data_df['ddd'] h1_window.return_ax().imshow( plot_image.reshape((255,255))) h1_window.return_ax().set_xlabel('x') h1_window.return_ax().set_ylabel('y') h2_window.return_ax().hist(ddd, bins=100) h2_window.return_ax().set_xlabel('x') h2_window.return_ax().set_ylabel('y') self.setCentralWidget(splitHwidget) def zoom(self): self.histtoolbar.zoom() def ax_changed(self,ax): old_xlim, old_ylim = self.lim_dict[ax] print("old xlim", old_xlim, "ylim", old_ylim) print("new xlim", ax.get_xlim(), "ylim", ax.get_ylim()) return np.all(old_xlim == ax.get_xlim()) and np.all(old_ylim == ax.get_ylim()) def onrelease(self,event): print("Active Toolbar button:",self.histtoolbar._active ) print("plot release") print(event) self.static_canvas.flush_events() changed_axes = [ax for ax in self.static_canvas.figure.axes if self.ax_changed(ax)] not_changed_axes = [ax for ax in self.static_canvas.figure.axes if not self.ax_changed(ax)] print("changed_axes",changed_axes) print("not_changed_axes",not_changed_axes) for ax in changed_axes: print("Changed xlim", ax.get_xlim(), "ylim", ax.get_ylim()) if __name__ == "__main__": epgt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.2, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.8, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) qapp = QtWidgets.QApplication(sys.argv) app = ApplicationWindow(epgt2fitparams) app.show() qapp.exec_()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
DiegoArcelli/BlocksWorld
refs/heads/main
{"/launch.py": ["/load_state.py", "/utils.py", "/blocks_world.py"], "/main.py": ["/load_state.py", "/utils.py", "/blocks_world.py"], "/search_algs.py": ["/blocks_world.py"]}
└── ├── blocks_world.py ├── cnn.py ├── launch.py ├── load_state.py ├── main.py ├── search_algs.py └── utils.py
/blocks_world.py
from aima3.search import * from utils import * import numpy as np import cv2 as cv import matplotlib.pyplot as plt # file che contine l'implementazione del problema basata con AIMA class BlocksWorld(Problem): def __init__(self, initial, goal): super().__init__(initial, goal) # restituisce il numero di blocchi def get_blocks_number(self): return len(self.initial) # restituisce la lista delle possibili azioni nello stato corrente def actions(self, state): blocks = [*state[0:-1]] size = state[-1] columns = {} tops = [] for block in blocks: n, i, j = block if j not in columns: columns[j] = (n, i, j) else: if i > columns[j][1]: columns[j] = (n, i, j) for col in columns: tops.append(columns[col]) actions = [] for block in tops: n, i, j = block for col in range(size): if col != j: if col in columns: actions.append((n, columns[col][1]+1, col)) else: actions.append((n, 0, col)) return actions # def result(self, state, actions): blocks = [*state[0:-1]] size = state[-1] to_delete = () for block in blocks: if block[0] == actions[0]: to_delete = block blocks.remove(to_delete) blocks.append((actions)) blocks.append(size) return tuple(blocks) # verifica se lo stato passato è lo stato finale def goal_test(self, state): op_1 = [*state[0:-1]] op_2 = [*self.goal[0:-1]] op_1.sort(key=lambda l: l[0]) op_2.sort(key=lambda l: l[0]) return str(op_1) == str(op_2) # restituisce i blocchi che possono essere spostati nello stato che viene passato def get_movable(self, state): blocks = [*state[0:-1]] size = state[-1] columns = {} tops = [] for block in blocks: n, i, j = block if j not in columns: columns[j] = (n, i, j) else: if i > columns[j][1]: columns[j] = (n, i, j) for col in columns: tops.append(columns[col]) return tops # euristica che calcola il numero di blocchi in posizione errata def misplaced_blocks(self, node): blocks = [*node.state[0:-1]] target = [*self.goal[0:-1]] target.sort(key=lambda l: l[0]) value = 0 for block in blocks: n, i, j = block if target[n-1][1:3] != (i, j): value += 1 # if block not in self.get_movable(node.state): # value += 1 return value # ritorna la profondità di un nodo nell'albero di ricerca def depth(self, node): return node.depth # stampa la lista delle azioni che portano dallo stato iniziale allo stato finale def solution(self, actions, output=True): if len(actions) is None: return state = self.initial successor = None n = 1 print("Lunghezza soluzione: " + str(len(actions))) for action in actions: print(action) successor = self.result(state, action) if output: figue_1 = self.draw_state(state) figue_2 = self.draw_state(successor) _, axarr = plt.subplots(1, 2) axarr[0].imshow(figue_1, cmap=plt.cm.binary) axarr[0].set_xticks([]) axarr[0].set_yticks([]) axarr[0].set_xlabel(f"\nStato {n}") axarr[1].imshow(figue_2, cmap=plt.cm.binary) axarr[1].set_xticks([]) axarr[1].set_yticks([]) axarr[1].set_xlabel(f"\nStato {n+1}") figManager = plt.get_current_fig_manager() figManager.full_screen_toggle() plt.show() state = successor n += 1 # metodo che fornisce una rappresentazione grafica dello stato che gli viene passato def draw_state(self, state): blocks = [*state[0:-1]] w = state[-1] blocks.sort(key=lambda l: l[1], reverse=True) h = blocks[0][1] image = np.zeros(((h+1)*100, w*100), np.uint8) for block in blocks: n, i, j = block i = h - i digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0) digit = cv.resize(digit, (100, 100)) image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit size = (len(state) - 1)*100 adjust = np.zeros((size, w*100), np.uint8) adjust[size - (h+1)*100 : size, :] = image return adjust
/cnn.py
import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.layers import Conv2D from keras.layers import MaxPool2D from keras.layers import Flatten from keras.layers import Dense from keras.layers import Dropout from keras import Sequential # file per allenare e salvare la rete neurale che effettua il riconoscimento delle cifre # il modello viene allenato sul dataset del MNIST BATCH_SIZE = 64 EPOCHS = 10 # si estraggono e si (x_train, y_train), (x_test, y_test) = mnist.load_data() # si aggiunge la dimensione del canale e si normalizza il valore dei pixel tra 0 e 1 x_train = np.expand_dims(x_train, -1) x_train = x_train / 255 x_test = np.expand_dims(x_test, -1) x_test = x_test / 255 # definizione del modello model = Sequential() model.add(Conv2D(filters=24, kernel_size=(3, 3), activation="relu")) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(filters=36, kernel_size=(3, 3))) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(Dense(10, activation="softmax")) model.predict(x_train[[0]]) model.summary() model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy']) # allenamento del modello history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(x_test, y_test)) # calcolo della precisione e dell'errore nel validation set test_loss, test_acc = model.evaluate(x_test, y_test) print('Test loss', test_loss) print('Test accuracy:', test_acc) # plot dei grafici relativi all'andamento di accuracy e loss plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() model.save("./model/model.h5")
/launch.py
import tkinter as tk from tkinter.filedialog import askopenfilename from PIL import Image, ImageTk from load_state import prepare_image from utils import draw_state from blocks_world import BlocksWorld from search_algs import * # file che contiene l'implementazione dell'interfaccia grafica per utilizzare il programma class Window(tk.Frame): def __init__(self, master=None): super().__init__(master) self.master = master self.pack() self.initial_state = None self.goal_state = None self.create_widgets() self.create_images("insert_image.png", "insert_image.png") def create_widgets(self): initial_label = tk.Label(self, text = "Seleziona stato iniziale:") goal_label = tk.Label(self, text = "Seleziona stato finale:") initial_label.grid(row = 0, column = 0, padx = 10, pady = 10) goal_label.grid(row = 0, column = 2, padx = 10, pady = 10) initial_button = tk.Button(self, text="Seleziona file", command=self.open_initial) goal_button = tk.Button(self, text="Seleziona file", command=self.open_goal) initial_button.grid(row = 1, column = 0, padx = 10, pady = 10) goal_button.grid(row = 1, column = 2, padx = 10, pady = 10) alg_label = tk.Label(self, text = "Seleziona algoritmo di ricerca:") alg_label.grid(row = 0, column = 1, padx = 10, pady = 10) frame = tk.Frame(self) frame.grid(row = 1, column = 1, padx = 10, pady = 10) self.selected = tk.StringVar(self) self.selected.set("BFS") select_alg_menu = tk.OptionMenu(frame, self.selected, "BFS", "DFS", "IDS", "UCS", "A*", "RBFS", command=self.read_algorithm).pack() start_button = tk.Button(frame, text="Start search", command=self.start_search).pack() def create_images(self, initial, goal): self.initial_image_path = initial self.initial_image = ImageTk.PhotoImage(Image.open("./images/" + initial).resize((300, 300))) initial_image_label = tk.Label(self, image=self.initial_image) initial_image_label.grid(row = 2, column = 0, padx = 10, pady = 10) self.goal_image_path = goal self.goal_image = ImageTk.PhotoImage(Image.open("./images/" + goal).resize((300, 300))) goal_image_label = tk.Label(self, image=self.goal_image) goal_image_label.grid(row = 2, column = 2, padx = 10, pady = 10) def open_initial(self): self.initial_file = askopenfilename() if self.initial_file == (): return self.initial_state = prepare_image(self.initial_file, False) print(self.initial_state) draw_state(self.initial_state, "initial") self.create_images("/temp/initial.jpg", self.goal_image_path) def read_algorithm(self, alg): return alg def open_goal(self): self.goal_file = askopenfilename() if self.goal_file == (): return self.goal_state = prepare_image(self.goal_file, False) print(self.goal_state) draw_state(self.goal_state, "goal") self.create_images(self.initial_image_path, "/temp/goal.jpg") def start_search(self): if self.goal_state is None and self.initial_state is None: return alg = self.selected.get() problem = BlocksWorld(self.initial_state, self.goal_state) print("Inizio ricerca:") if alg == "BFS": problem.solution(graph_bfs(problem).solution()) if alg == "A*": problem.solution(a_star(problem, lambda n: problem.misplaced_blocks(n)).solution()) if alg == "DFS": problem.solution(graph_dfs(problem).solution()) if alg == "IDS": problem.solution(ids(problem).solution()) if alg == "RBFS": problem.solution(rbfs(problem, lambda n: problem.misplaced_blocks(n)).solution()) if alg == "UCS": problem.solution(a_star(problem, lambda n: problem.depth(n)).solution()) root = tk.Tk() root.title("Blocks World") root.resizable(0, 0) app = Window(master=root) app.mainloop()
/load_state.py
import cv2 as cv import numpy as np import matplotlib.pyplot as plt import glob from tensorflow import keras from math import ceil deteced = [np.array([]) for x in range(6)] # lista che contiene le immagini delle cifre poisitions = [None for x in range(6)] # lista che contiene la posizione delle cifre nell'immagine debug_mode = False model = keras.models.load_model("./model/model.h5") # carica il modello allenato sul datase del MNIST # funzione che si occupa del riconoscimento della cifra presente nell'immagine # che gli viene passato come parametro def predict(image): h, w = image.shape l = int(max(image.shape)*1.2) n_h = int((l - h)/2) n_w = int((l - w)/2) img = np.zeros((l, l), np.uint8) img[n_h : n_h + h, n_w : n_w + w] = image img = (img / 255).astype('float64') img = cv.resize(img, (28, 28), interpolation = cv.INTER_AREA) _in = np.array([img]) _in = np.expand_dims(_in, -1) digit = np.argmax(model.predict(_in)) if debug_mode: print(digit) show(img) return digit - 1 if digit > 0 else -1 # stampa a schermo l'immagine che gli veiene passata come parametro def show(img): figManager = plt.get_current_fig_manager() figManager.full_screen_toggle() plt.xticks([]) plt.yticks([]) plt.imshow(img) plt.show() # prime modifiche all'immagine che consistono nell'applicazione di blur def preprocess(image): image = cv.medianBlur(image, 3) image = cv.GaussianBlur(image, (3, 3), 0) return 255 - image def postprocess(image): image = cv.medianBlur(image, 5) image = cv.medianBlur(image, 5) kernel = np.ones((3, 3), np.uint8) image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel) kernel = np.ones((3, 3), np.uint8) image = cv.erode(image, kernel, iterations=2) return image def get_block_index(image_shape, yx, block_size): y = np.arange(max(0, yx[0]-block_size), min(image_shape[0], yx[0]+block_size)) x = np.arange(max(0, yx[1]-block_size), min(image_shape[1], yx[1]+block_size)) return np.meshgrid(y, x) def adaptive_median_threshold(img_in): med = np.median(img_in) threshold = 40 img_out = np.zeros_like(img_in) img_out[img_in - med < threshold] = 255 return img_out def block_image_process(image, block_size): out_image = np.zeros_like(image) for row in range(0, image.shape[0], block_size): for col in range(0, image.shape[1], block_size): idx = (row, col) block_idx = get_block_index(image.shape, idx, block_size) out_image[block_idx] = adaptive_median_threshold(image[block_idx]) return out_image def clean(image): contours, hierarchy = cv.findContours( image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) for contour in contours: approx = cv.approxPolyDP( contour, 0.001 * cv.arcLength(contour, True), True) x, y, w, h = cv.boundingRect(approx) if search_noise(contour, approx, image.shape[::-1]): cv.drawContours(image, [approx], 0, 255, -1) return image def search_noise(contour, approx, image_size): i_h, i_w = image_size x, y, w, h = cv.boundingRect(approx) image_area = i_w*i_h if cv.contourArea(contour) >= image_area/1000: return False if w >= i_w/50 or h >= i_h/50: return False return True def find_digits(image, org_image, org): contours, hierarchy = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) i = 0 for contour in contours: approx = cv.approxPolyDP(contour, 0.001 * cv.arcLength(contour, True), True) x, y, w, h = cv.boundingRect(approx) if hierarchy[0][i][3] == -1: prev = predict(org_image[y:y+h, x:x+w]) if prev != -1: deteced[prev] = org[y:y+h, x:x+w] poisitions[prev] = (x, y, x + w, y + h) i += 1 # funzione che individua il box che contiene i blocchi ed individua le cifre def find_box(image): o_h, o_w = image.shape[0:2] contours, hierarchy = cv.findContours( image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) contours.sort(reverse=True, key=lambda c: cv.contourArea(c)) contour = contours[1] approx = cv.approxPolyDP( contour, 0.001 * cv.arcLength(contour, True), True) x, y, w, h = cv.boundingRect(approx) box = (x, y, x + w, y + h) img = image[y:y+h, x:x+w] sub = img.copy() bg = ~np.zeros((h + 50, w + 50), np.uint8) bg[25: 25 + h, 25: 25 + w] = img img = bg i = 0 i_h, i_w = img.shape[0:2] tot = np.zeros(shape=(i_h, i_w)) if debug_mode: print(image) contours, hierarchy = cv.findContours(img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) for contour in contours: approx = cv.approxPolyDP( contour, 0.001 * cv.arcLength(contour, True), True) if hierarchy[0][i][3] == 0: cv.drawContours(tot, [approx], 0, 255, -1) if hierarchy[0][i][3] == 1: cv.drawContours(tot, [approx], 0, 0, -1) i += 1 tot = tot[25: 25 + h, 25: 25 + w] kernel = np.ones((5, 5), np.uint8) tot = cv.dilate(tot, kernel, iterations=3) tot = tot.astype('uint32') sub = sub.astype('uint32') res = sub + tot res = np.where(res == 0, 255, 0) result = np.zeros((o_h, o_w), np.uint8) result[y:y+h, x:x+w] = res if debug_mode: show(result) return (result, box) def get_block_borders(dims, image): x_i, y_i, x_f, y_f = dims kernel = np.ones((5, 5), np.uint8) image = cv.erode(image, kernel, iterations=1) y_m = (y_f + y_i) // 2 x_m = (x_f + x_i) // 2 t = x_i - 1 while image[y_m, t] != 255: t-=1 x_i = t t = x_f + 1 while image[y_m, t] != 255: t+=1 x_f = t t = y_i - 1 while image[t, x_m] != 255: t-=1 y_i = t t = y_f + 1 while image[t, x_m] != 255: t+=1 y_f = t return (x_i, y_i, x_f, y_f) def process_image_file(filename): global deteced, poisitions, explored, debug_mode block_size = 50 deteced = [np.array([]) for x in range(6)] poisitions = [None for x in range(6)] explored = [] image_in = cv.cvtColor(cv.imread(filename), cv.COLOR_BGR2GRAY) if debug_mode: show(image_in) image_in_pre = preprocess(image_in) image_out = block_image_process(image_in_pre, block_size) image_out = postprocess(image_out) image_out = clean(image_out) if debug_mode: show(image_out) digits, box = find_box(image_out) find_digits(digits, ~image_out, image_in) for i in range(6): if deteced[i].size > 0: image = deteced[i] x, y, w, h = get_block_borders(poisitions[i], ~image_out) poisitions[i] = (x, y, w, h) cv.rectangle(image_in, (x, y), (w, h), 255, 2) if debug_mode: show(image_in) return box def check_intersection(values): v1_i, v1_f, v2_i, v2_f = values v2_m = (v2_i + v2_f) // 2 if v1_i < v2_m and v1_f > v2_m: return True return False def create_state(poisitions, box): cols = [[] for x in range(6)] mean_points = [] for i in range(6): if poisitions[i] is not None: x1_i, y1_i, x1_f, y1_f = poisitions[i] mean_points.append(((x1_f + x1_i) // 2, ((y1_f + y1_i) // 2))) c = [i+1] for j in range(6): if poisitions[j] is not None and j != i: x2_i, y2_i, x2_f, y2_f = poisitions[j] if check_intersection((x1_i, x1_f, x2_i, x2_f)): c.append(j+1) c.sort() cols[i] = tuple([*c]) else: cols[i] = () temp_cols = list(set(tuple(cols))) if () in temp_cols: temp_cols.remove(()) cols = [] for t_col in temp_cols: col = list(t_col) col.sort(reverse=True, key=lambda e: mean_points[e-1][1]) cols.append(tuple(col)) cols.sort(key=lambda e: mean_points[e[0]-1][0]) bottoms = [col[0] for col in cols] distances = [] xb_i, _, xb_f, _ = box x_i, _, x_f, _ = poisitions[bottoms[0]-1] dist = abs(x_i - xb_i) dist = dist / (x_f - x_i) distances.append(dist) for i in range(len(bottoms)-1): x1_i, _, x1_f, _ = poisitions[bottoms[i]-1] x2_i, _, _, _ = poisitions[bottoms[i+1]-1] dist = abs(x2_i - x1_f) dist = dist / (x1_f - x1_i) distances.append(dist) x_i, _, x_f, _ = poisitions[bottoms[-1]-1] dist = abs(xb_f - x_f) dist = dist / (x_f - x_i) distances.append(dist) for i in range(len(distances)): dist = distances[i] if dist - int(dist) >= 0.5: distances[i] = int(dist) + 1 else: distances[i] = int(dist) n = sum(distances) + len(cols) i = distances[0] state = [] pos = 1 for col in cols: j = 0 for block in col: state.append((block, j, i)) j += 1 i += distances[pos] + 1 pos += 1 state.append(n) return tuple(state) def prepare_image(file_path, debug): global debug_mode debug_mode = True if debug else False box = process_image_file(file_path) state = create_state(poisitions, box) return state
/main.py
from PIL import Image, ImageTk from load_state import prepare_image from utils import draw_state from blocks_world import BlocksWorld from search_algs import * import argparse from inspect import getfullargspec # file che definisce lo script da linea di comando per utilizzare il programma if __name__ == "__main__": search_algs = { "astar": a_star, "ucs": ucs, "rbfs": rbfs, "bfs": graph_bfs, "dfs": graph_dfs, "ids": ids } parser = argparse.ArgumentParser(description="Blocks World") parser.add_argument("--initial", "-i", type=str, default=None, required=True, help="The image representing the initial state") parser.add_argument("--goal", "-g", type=str, default=None, required=True, help="The image representing the goal state") parser.add_argument("--algorithm", "-a", type=str, default=None, required=True, help="The search algorithm used") parser.add_argument("--debug", "-d", default=False, required=False, action='store_true', help="Shows the steps of the image processing") parser.add_argument("--output", "-o", default=False, required=False, action='store_true', help="The solution is printed graphically") args = vars(parser.parse_args()) initial_state_path = args["initial"] goal_state_path = args["goal"] search_alg = args["algorithm"] debug = args["debug"] output = args["output"] initial_state = prepare_image(initial_state_path, debug) goal_state = prepare_image(goal_state_path, debug) print(initial_state) print(goal_state) functions = { "ucs": lambda n: problem.depth(n), "astar": lambda n: problem.misplaced_blocks(n), "rbfs": lambda n: problem.misplaced_blocks(n) } problem = BlocksWorld(initial_state, goal_state) if len(getfullargspec(search_algs[search_alg]).args) == 2: problem.solution(search_algs[search_alg](problem, functions[search_alg]).solution(), output) else: problem.solution(search_algs[search_alg](problem).solution(), output)
/search_algs.py
from aima3.search import * from utils import * from collections import deque from blocks_world import BlocksWorld import sys # file che contiene le implementazioni degli algoritmi di ricerca node_expanded = 0 # numero di nodi espansi durante la ricerca max_node = 0 # massimo numero di nodi presenti nella frontiera durante la ricerca f_dim = 0 # dimensione della frontiera in un dato momento total_node = 0 def init_param(): global node_expanded, total_node, max_node, f_dim node_expanded = 0 max_node = 0 total_node = 0 f_dim = 0 def print_param(): print(f"Nodi espansi: {node_expanded}") print(f"Max dimensione della frontiera: {max_node}") print(f"Dim media della frontiera: {int(total_node/node_expanded)}") # def get_item(queue, key): # """Returns the first node associated with key in PriorityQueue. # Raises KeyError if key is not present.""" # for _, item in queue.heap: # if item == key: # return item # raise KeyError(str(key) + " is not in the priority queue") def show_solution(name_algo, node): try: print(name_algo + ":", node.solution()) except: if type(Node) == str: print(name_algo + ":", node) else: print(name_algo + ":", "No solution found") # Graph Breadth First Search def graph_bfs(problem): global node_expanded, total_node, max_node, f_dim init_param() frontier = deque([Node(problem.initial)]) f_dim += 1 explored = set() while frontier: node_expanded += 1 total_node += f_dim node = frontier.popleft() f_dim -= 1 explored.add(node.state) if problem.goal_test(node.state): # print(node_expanded) print_param() return node for child_node in node.expand(problem): if child_node.state not in explored and child_node not in frontier: f_dim += 1 max_node = f_dim if f_dim > max_node else max_node frontier.append(child_node) # Graph Depth First Search def graph_dfs(problem): global node_expanded, total_node, max_node, f_dim init_param() frontier = deque([Node(problem.initial)]) f_dim += 1 explored = set() while frontier: total_node += f_dim node = frontier.pop() node_expanded += 1 f_dim -= 1 if problem.goal_test(node.state): print_param() return node explored.add(node.state) for child_node in node.expand(problem): if child_node.state not in explored and child_node not in frontier: f_dim += 1 max_node = f_dim if f_dim > max_node else max_node frontier.append(child_node) # Uniform Cost Search def ucs(problem, f): global node_expanded, total_node, max_node, f_dim init_param() if problem.goal_test(problem.initial): return Node(problem.initial) f = memoize(f, 'f') node_expanded += 1 frontier = PriorityQueue('min', f) frontier.append(Node(problem.initial)) f_dim += 1 explored = set() while frontier: total_node += f_dim node_expanded += 1 node = frontier.pop() f_dim -= 1 # print(node, f(node)) if problem.goal_test(node.state): print_param() return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: f_dim += 1 frontier.append(child) max_node = f_dim if f_dim > max_node else max_node elif child in frontier: next_node = frontier.get_item(child) if f(child) < f(next_node): del frontier[next_node] frontier.append(child) # Depth Limited Search def dls(problem, limit): def recursive_dls(problem, node, limit): global node_expanded, total_node, max_node, f_dim node_expanded += 1 total_node += f_dim if problem.goal_test(node.state): return node elif limit == 0: return 'cutoff' cutoff_occurred = False for child_node in node.expand(problem): f_dim+=1 max_node = f_dim if f_dim > max_node else max_node result = recursive_dls(problem, child_node, limit-1) f_dim -= 1 if result == 'cutoff': cutoff_occurred = True elif result is not None: return result return 'cutoff' if cutoff_occurred else None return recursive_dls(problem, Node(problem.initial), limit) # Iterative Deepening Search def ids(problem): global node_expanded, total_node, max_node, f_dim init_param() prevexp = 0 for depth in range(sys.maxsize): f_dim += 1 result = dls(problem, depth) print(node_expanded - prevexp) prevexp = node_expanded f_dim = 0 if result != 'cutoff': print_param() return result return None # A* def a_star(problem: BlocksWorld, h=None): global node_expanded h = memoize(h or problem.h) return ucs(problem, lambda n: problem.depth(n) + h(n)) # Recursive Best First Search def rbfs(problem, h): global node_expanded, total_node, max_node, f_dim init_param() h = memoize(h or problem.h, 'h') g = memoize(lambda n: problem.depth(n), 'g') f = memoize(lambda n: g(n) + h(n), 'f') def rbfs_search(problem, node, f_limit=np.inf): global node_expanded, total_node, max_node, f_dim node_expanded += 1 if problem.goal_test(node.state): print_param() return node, 0 successors = [*node.expand(problem)] f_dim += len(successors) total_node += f_dim max_node = f_dim if f_dim > max_node else max_node if len(successors) == 0: return None, np.inf for child in successors: child.f = max(f(child), node.f) while True: successors.sort(key=lambda x: x.f) best = successors[0] if best.f > f_limit: f_dim -= len(successors) return None, best.f alt = successors[1].f if len(successors) > 1 else np.inf # importante, sovrascrivere best.f result, best.f = rbfs_search(problem, best, min(f_limit, alt)) # return result if result is not None: f_dim -= len(successors) return result, best.f node = Node(problem.initial) f(node) f_dim += 1 return rbfs_search(problem, node)[0]
/utils.py
import heapq import functools import numpy as np import cv2 as cv import matplotlib.pyplot as plt class PriorityQueue: """A Queue in which the minimum (or maximum) element (as determined by f and order) is returned first. If order is 'min', the item with minimum f(x) is returned first; if order is 'max', then it is the item with maximum f(x). Also supports dict-like lookup.""" def __init__(self, order='min', f=lambda x: x): self.heap = [] if order == 'min': self.f = f elif order == 'max': # now item with max f(x) self.f = lambda x: -f(x) # will be popped first else: raise ValueError("Order must be either 'min' or 'max'.") def append(self, item): """Insert item at its correct position.""" heapq.heappush(self.heap, (self.f(item), item)) def extend(self, items): """Insert each item in items at its correct position.""" for item in items: self.append(item) def pop(self): """Pop and return the item (with min or max f(x) value) depending on the order.""" if self.heap: return heapq.heappop(self.heap)[1] else: raise Exception('Trying to pop from empty PriorityQueue.') def __len__(self): """Return current capacity of PriorityQueue.""" return len(self.heap) def __contains__(self, key): """Return True if the key is in PriorityQueue.""" return any([item == key for _, item in self.heap]) def __getitem__(self, key): """Returns the first value associated with key in PriorityQueue. Raises KeyError if key is not present.""" for value, item in self.heap: if item == key: return value raise KeyError(str(key) + " is not in the priority queue") def __delitem__(self, key): """Delete the first occurrence of key.""" try: del self.heap[[item == key for _, item in self.heap].index(True)] except ValueError: raise KeyError(str(key) + " is not in the priority queue") heapq.heapify(self.heap) def get_item(self, key): """Returns the first node associated with key in PriorityQueue. Raises KeyError if key is not present.""" for _, item in self.heap: if item == key: return item raise KeyError(str(key) + " is not in the priority queue") def is_in(elt, seq): """Similar to (elt in seq), but compares with 'is', not '=='.""" return any(x is elt for x in seq) def memoize(fn, slot=None, maxsize=32): """Memoize fn: make it remember the computed value for any argument list. If slot is specified, store result in that slot of first argument. If slot is false, use lru_cache for caching the values.""" if slot: def memoized_fn(obj, *args): if hasattr(obj, slot): return getattr(obj, slot) else: val = fn(obj, *args) setattr(obj, slot, val) return val else: @functools.lru_cache(maxsize=maxsize) def memoized_fn(*args): return fn(*args) return memoized_fn def draw_state(state, file_path): blocks = [*state[0:-1]] w = state[-1] blocks.sort(key=lambda l: l[1], reverse=True) h = blocks[0][1] image = np.zeros(((h+1)*100, w*100), np.uint8) for block in blocks: n, i, j = block i = h - i digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0) digit = cv.resize(digit, (100, 100)) image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit size = (len(state) - 1)*100 padded = np.zeros((size, w*100), np.uint8) padded[size - (h+1)*100 : size, :] = image h = len(state) - 1 bg = np.zeros((h*100 + 40, w*100 + 40), np.uint8) bg[20: h*100 + 20, 20: w*100 + 20] = padded bg[0:10, :] = 255 bg[h*100 + 30 : h*100 + 40, :] = 255 bg[:, 0:10] = 255 bg[h*100 + 30 : h*100 + 40, :] = 255 bg[:,w*100 + 30 : w*100 + 40] = 255 w, h = (w*100 + 40, h*100 + 40) l = max(w, h) adjust = np.zeros((l, l), np.uint8) d_w = (l - w) // 2 d_h = (l - h) // 2 adjust[d_h: d_h + h, d_w: d_w + w] = bg cv.imwrite("./images/temp/" + str(file_path) + ".jpg", ~adjust)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
viaacode/status
refs/heads/master
{"/src/viaastatus/server/wsgi.py": ["/src/viaastatus/server/response.py", "/src/viaastatus/decorators.py"]}
└── ├── locustfile.py ├── setup.py └── src └── viaastatus ├── decorators.py ├── prtg │ └── api.py └── server ├── cli.py ├── response.py └── wsgi.py
/locustfile.py
from locust import HttpLocust, TaskSet, task class WebsiteTasks(TaskSet): @task def index(self): self.client.get("/") @task def status(self): self.client.get("/status") @task def hetarchief(self): self.client.get("/status/hetarchief.png") @task def ftp(self): self.client.get("/status/ftp.png") class WebsiteUser(HttpLocust): task_set = WebsiteTasks min_wait = 5000 max_wait = 15000
/setup.py
from setuptools import setup, find_packages with open('README.md') as f: long_description = f.read() with open('requirements.txt') as f: requirements = list(map(str.rstrip, f.readlines())) setup( name='viaastatus', url='https://github.com/viaacode/status/', version='0.0.3', author='VIAA', author_email='support@viaa.be', descriptiona='Status services', long_description=long_description, classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], python_requires='>=3.4', packages=find_packages("src"), package_dir={"": "src"}, package_data={'viaastatus': ['server/static/*']}, include_package_data=True, install_requires=requirements, extras_require={ 'test': [ "pytest>=4.2.0" ], 'loadtest': [ "locustio>=0.11.0" ], 'gunicorn': [ 'gunicorn>=19.9.0' ], 'uwsgi': [ 'uWSGI>=2.0.18' ], 'waitress': [ 'waitress>=1.2.1' ] }, platforms='any' )
/src/viaastatus/decorators.py
from functools import wraps, partial from flask import request, render_template def cached(key='view/%s', cache=None, **extra_cache_kwargs): def decorator(f): @wraps(f) def decorated(*args, **kwargs): cache_key = key % request.path rv = cache.get(cache_key) if rv is not None: return rv rv = f(*args, **kwargs) cache.set(cache_key, rv, **extra_cache_kwargs) return rv return decorated return decorator def cacher(cache, **kwargs): return partial(cached, cache=cache, **kwargs) def templated(template=None): def decorator(f): @wraps(f) def decorated(*args, **kwargs): template_name = template if template_name is None: template_name = request.endpoint \ .replace('.', '/') + '.html' ctx = f(*args, **kwargs) if ctx is None: ctx = {} elif not isinstance(ctx, dict): return ctx return render_template(template_name, **ctx) return decorated return decorator
/src/viaastatus/prtg/api.py
import requests import json import functools import logging # from collections import defaultdict # from xml.etree import ElementTree # ref: https://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree # def etree_to_dict(t): # d = {t.tag: {} if t.attrib else None} # children = list(t) # if children: # dd = defaultdict(list) # for dc in map(etree_to_dict, children): # for k, v in dc.items(): # dd[k].append(v) # d = {t.tag: {k: v[0] if len(v) == 1 else v # for k, v in dd.items()}} # if t.attrib: # d[t.tag].update(('@' + k, v) # for k, v in t.attrib.items()) # if t.text: # text = t.text.strip() # if children or t.attrib: # if text: # d[t.tag]['#text'] = text # else: # d[t.tag] = text # return d logger = logging.getLogger(__name__) entrypoint = '/api' class PRTGError(Exception): pass class PRTGAuthenticationError(PRTGError): pass class ResponseTypes: @staticmethod def json(data): return json.loads(data) # @staticmethod # def xml(data): # return etree_to_dict(ElementTree.XML(data)) class API: def __init__(self, host, username, passhash): self._requests = requests self._host = host self._authparams = { "username": username, "passhash": passhash } @property def requests(self): return self._requests @requests.setter def requests(self, val): self._requests = val def _call(self, method, response_type=None, **params): if response_type is None: response_type = 'json' if not hasattr(ResponseTypes, response_type): raise ValueError("Unknown response type", response_type) url = '%s%s/%s.%s' % (self._host, entrypoint, method, response_type) try: params = dict(params, **self._authparams) response = self._requests.get(url, params=params) if response.status_code != 200: logger.warning("Wrong exit code %d for %s", response.status_code, url) raise PRTGError("Invalid HTTP code response", response.status_code) return getattr(ResponseTypes, response_type)(response.content.decode('utf-8')) except Exception as e: raise PRTGError(e) from e def __getattr__(self, item): return functools.partial(self._call, item) @staticmethod def from_credentials(host, username, password, _requests=None): url = '%s%s/getpasshash.htm' % (host, entrypoint) params = { "username": username, "password": password, } if _requests is None: _requests = requests.Session() response = _requests.get(url, params=params) if response.status_code != 200: raise PRTGAuthenticationError("Couldn't authenticate", response.status_code, response.content) result = API(host, username, response.content) result.requests = _requests return result
/src/viaastatus/server/cli.py
from argparse import ArgumentParser from viaastatus.server import wsgi import logging def argparser(): """ Get the help and arguments specific to this module """ parser = ArgumentParser(prog='status', description='A service that supplies status information about our platforms') parser.add_argument('--debug', action='store_true', help='run in debug mode') parser.add_argument('--host', help='hostname or ip to serve api') parser.add_argument('--port', type=int, default=8080, help='port used by the server') parser.add_argument('--log-level', type=str.lower, default='warning', dest='log_level', choices=list(map(str.lower, logging._nameToLevel.keys())), help='set the logging output level') return parser def main(): args = argparser().parse_args() logging.basicConfig(level=args.log_level.upper()) logging.getLogger().setLevel(args.log_level.upper()) del args.log_level wsgi.create_app().run(**args) if __name__ == '__main__': main()
/src/viaastatus/server/response.py
import os from flask import jsonify, Response import flask class FileResponse(Response): default_mimetype = 'application/octet-stream' def __init__(self, filename, **kwargs): if not os.path.isabs(filename): filename = os.path.join(flask.current_app.root_path, filename) with open(filename, 'rb') as f: contents = f.read() response = contents super().__init__(response, **kwargs) class StatusResponse(FileResponse): default_mimetype = 'image/png' def __init__(self, status, **kwargs): if status is True: status = 'ok' elif status is False: status = 'nok' else: status = 'unk' filename = 'static/status-%s.png' % (status,) super().__init__(filename, **kwargs) class Responses: @staticmethod def json(obj): return jsonify(obj) @staticmethod def html(obj): return Response('<html><body>%s</body></html>' % (obj,), content_type='text/html') @staticmethod def txt(obj): if type(obj) is not str: obj = '\n'.join(obj) return Response(obj, content_type='text/plain') @staticmethod def status(status_): return StatusResponse(status_)
/src/viaastatus/server/wsgi.py
from flask import Flask, abort, Response, send_file, request, flash, session, render_template from flask import url_for, redirect from viaastatus.prtg import api from viaastatus.decorators import cacher, templated from os import environ import logging from configparser import ConfigParser import re import hmac from hashlib import sha256 from functools import wraps, partial import argparse import itertools import werkzeug.contrib.cache as workzeug_cache from viaastatus.server.response import Responses import requests log_level = logging._nameToLevel[environ.get('VERBOSITY', 'debug').upper()] logging.basicConfig(level=log_level) logger = logging.getLogger(__name__) logging.getLogger().setLevel(log_level) def normalize(txt): txt = txt.replace(' ', '-').lower() txt = re.sub('-{2,}', '-', txt) txt = re.sub(r'\([^)]*\)', '', txt) txt = re.sub(r'\[[^)]*\]', '', txt) txt = re.sub('-[0-9]*$', '', txt) txt = re.sub('-{2,}', '-', txt) return txt def create_app(): app = Flask(__name__) config = ConfigParser() config.read(environ.get('CONFIG_FILE', 'config.ini')) app_config = config['app'] cache_timeout = int(app_config.get('cache_timeout', 30)) if cache_timeout > 0: cache_ = workzeug_cache.SimpleCache(default_timeout=cache_timeout) else: cache_ = workzeug_cache.NullCache() cache = cacher(cache_)() cache_other = cacher(cache_, timeout=cache_timeout, key='other/%s')() app.secret_key = app_config['secret_key'] salt = app_config['salt'] @cache_other def get_sensors(prtg_) -> dict: sensors = {} cols = 'objid,name,device' ippattern = re.compile(r'[\d\.]+') for sensor in prtg_.table(content='sensors', filter_type=['http', 'ftp', 'httptransaction'], filter_active=-1, columns=cols)['sensors']: parentname = sensor['device'] sensor_name = sensor['name'] if sensor_name.startswith('HTTP'): # filter out IPs if ippattern.fullmatch(parentname): continue sensor_name = parentname + ' - ' + sensor_name sensor_name = normalize(sensor_name) if sensor_name in sensors: logger.warning("Sensor '%s' is conflicting (current id: %d, requested to set to: %d), ignored", sensor_name, sensors[sensor_name], sensor['objid']) continue sensors[sensor_name] = int(sensor['objid']) return sensors def _token(*args, **kwargs): """Calculates the token """ params = str([args, kwargs]) return hmac.new(salt.encode('utf-8'), params.encode('utf-8'), sha256).hexdigest()[2:10] def secured_by_login(func): """ Decorator to define routes secured_by_login """ @wraps(func) def _(*args, **kwargs): if not login_settings: logger.info('Login requested but refused since no login data in config') abort(404) if not session.get('authenticated'): return _login() return func(*args, **kwargs) return _ def secured_by_token(func): """ Decorator to define routes secured_by_token. """ @wraps(func) def _(*args, **kwargs): check_token = 'authenticated' not in session if 'ignore_token' in kwargs: check_token = not kwargs['ignore_token'] del kwargs['ignore_token'] if check_token: token = request.args.get('token') expected_token = _token(*args, **kwargs) if token != expected_token: logger.warning("Wrong token '%s' for %s, expected: '%s'", token, func.__name__, expected_token) abort(401) return func(*args, **kwargs) _._secured_by_token = _token return _ prtg_conf = config['prtg'] _requests = requests.Session() if 'certificate' in prtg_conf: _requests.cert = (prtg_conf['certificate'], prtg_conf['private_key']) prtg = api.API.from_credentials(prtg_conf['host'], prtg_conf['username'], prtg_conf['password'], _requests) login_settings = None if config.has_section('login'): login_settings = dict(config['login']) class Choices: @staticmethod def sensor(): return list(get_sensors(prtg).keys()) @staticmethod def type_(): return {'json', 'png', 'txt', 'html'} @staticmethod def ttype(): return {'json', 'txt', 'html'} @app.route('/login', methods=['GET']) @templated('login.html') def _login(): pass @app.route('/urls', methods=['GET']) @secured_by_login @templated('urls.html') def _urls(): context = {} rules = [rule for rule in application.url_map.iter_rules() if rule.is_leaf and rule.endpoint != 'static' and not rule.endpoint.startswith('_')] method_types = {} for i in range(len(rules)): rule = rules[i] rules[i] = rules[i].__dict__ kargs = [argname for argname in rule.arguments if hasattr(Choices, argname)] vargs = [getattr(Choices, argname)() for argname in kargs] methods = [] for params in itertools.product(*vargs): params = dict(zip(kargs, params)) url = url_for(rule.endpoint, **params) view_func = app.view_functions[rule.endpoint] if hasattr(view_func, '_secured_by_token'): url += '?token=%s' % (view_func._secured_by_token(**params)) methods.append({ "name": rule.endpoint, "params": params, "url": url, }) method_types[rule.endpoint] = methods context['method_types'] = method_types return context @app.route('/login', methods=['POST']) def _do_login(): if not login_settings: logger.info('Login requested but refused since no login data in config') abort(404) if request.form['password'] != login_settings['password'] or \ request.form['username'] != login_settings['username']: flash('Invalid credentials!') else: session['authenticated'] = True return redirect('/urls') @app.route('/', methods=['GET']) @cache @templated('oldstatus.html') def index_(): pass @app.route('/sensors.<ttype>') @cache @secured_by_token def sensors_(ttype): if ttype not in Choices.ttype(): abort(404) return getattr(Responses, ttype)(Choices.sensor()) @app.route('/status/<sensor>.<type_>', methods=['GET']) @cache @secured_by_token def status_(sensor, type_): """ :param str sensor: Name of the sensor :param str type_: Response type :return: """ if type_ not in Choices.type_(): abort(404) try: sensors = get_sensors(prtg) if sensor not in sensors: abort(404) sensor_id = sensors[sensor] status = prtg.getsensordetails(id=sensor_id)['sensordata'] except Exception as e: if type_ == 'png': return Responses.status(None) raise e if type_ == 'png': if int(status['statusid']) in [3, 4]: status = True elif int(status['statusid']) in [7, 8, 9, 10, 12]: status = None else: status = False return Responses.status(status) if type_ == 'txt': status = status['statustext'] elif type_ == 'html': status_msg = ''' <dl> <dt>%s</dt> <dd><a href="%s/sensor.htm?id=%d">%s</a></dd> </dl> ''' status = status_msg % (prtg._host, sensor, sensor_id, status['statustext']) return getattr(Responses, type_)(status) @app.route('/status', methods=['GET']) @templated('statuspage.html') def status_page(): if not config.has_section('aliases'): abort(404) aliases = {url: fwd.split(':')[1] for url, fwd in config['aliases'].items()} return dict(aliases=aliases) # add aliases if config.has_section('aliases'): for url, target in config['aliases'].items(): target = target.split(':') name = target.pop(0) func = app.view_functions[name] kwargs = dict(ignore_token=True) func = partial(func, *target, **kwargs) func.__name__ = url app.route(url)(func) return app application = create_app() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--debug', action='store_true', help='run in debug mode') parser.add_argument('--host', help='hostname or ip to serve app') parser.add_argument('--port', type=int, default=1111, help='port used by the server') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) logger.setLevel(logging.DEBUG) application.run(host=args.host, port=args.port, debug=args.debug)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
digital-sustainability/swiss-procurement-classifier
refs/heads/master
{"/runOldIterations.py": ["/train.py", "/collection.py"], "/runIterations.py": ["/learn.py", "/collection.py"]}
└── ├── collection.py ├── db.py ├── helpers.py ├── learn.py ├── runIterations.py ├── runOldIterations.py └── train.py
/collection.py
import json import pandas as pd import warnings class Collection(): algorithms = ['gradient_boost', 'decision_tree', 'random_forest'] def __init__(self): self.list = [] def append(self, item): self.list.append(item) def __iter__(self): return iter(self.list) def get_all_as_df(self, algorithm): try: tmp = [] for iteration in self.list: tmp.append(iteration[algorithm]['metadata']) return pd.DataFrame(tmp, index=[iteration['anbieter'] for iteration in self.list]) except: warnings.warn('Select an algorithm: "random_forest", "gradient_boost" or "decision_tree"') def df_row_per_algorithm(self): tmp = [] for iteration in self.list: for algorithm in self.algorithms: output = iteration[algorithm]['metadata'] evaluation_dataframe = pd.DataFrame.from_dict(iteration[algorithm]['data']) # missing metrics output['acc_std'] = evaluation_dataframe['accuracy'].std() evaluation_dataframe['MCC'] = evaluation_dataframe['MCC']*100 output['mcc_std'] = evaluation_dataframe['MCC'].std() output['fn_std'] = evaluation_dataframe['fn_rate'].std() output['anbieter'] = iteration['anbieter'] output['label'] = iteration['label'] output['algorithm'] = algorithm output['attributes'] = ",".join(iteration['attributes']) tmp.append(output) return pd.DataFrame(tmp) def to_json(self, **kwargs): return json.dumps(self.list, **kwargs) def to_file(self, filename): with open(filename, 'w') as fp: json.dump(self.list, fp, indent=4, sort_keys=True) def import_file(self, filename, force=False): if len(self.list) and not force: warnings.warn("Loaded Collection, pls add force=True") else: with open(filename, 'r') as fp: self.list = json.load(fp)
/db.py
import configparser import sqlalchemy # git update-index --skip-worktree config.ini config = configparser.ConfigParser() config.read("config.ini") connection_string = 'mysql+' + config['database']['connector'] + '://' + config['database']['user'] + ':' + config['database']['password'] + '@' + config['database']['host'] + '/' + config['database']['database'] if __name__ == "__main__": for item, element in config['database'].items(): print('%s: %s' % (item, element)) print(connection_string) else: engine = sqlalchemy.create_engine(connection_string) connection = engine.connect()
/helpers.py
from db import connection, engine import math import pandas as pd import numpy as np from sklearn import tree from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc # ===================== # SQL SELECT STATEMENTS # ===================== # @param select: SELECT argument formatted as string # @return a Pandas dataframe from the full Simap datanbase depending on the SQL SELECT Query def getFromSimap(select): query = """SELECT {} from (((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer) INNER JOIN cpv ON cpv_dokument.cpv_nummer = cpv.cpv_nummer; """.format(select) return pd.read_sql(query, connection); # @param bidder: anbieter.institution name formatted as string # @return a Pandas dataframe showing the most important CPV codes per bidder. (Zuschläge pro CPV Code) def getCpvCount(bidder): query = """SELECT cpv.cpv_nummer, cpv.cpv_deutsch, COUNT(cpv_dokument.cpv_nummer) FROM cpv, cpv_dokument, zuschlag, beruecksichtigteanbieter_zuschlag, anbieter WHERE cpv.cpv_nummer = cpv_dokument.cpv_nummer AND cpv_dokument.meldungsnummer = zuschlag.meldungsnummer AND zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer AND beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id AND anbieter.institution = "{}" GROUP BY cpv_nummer ORDER BY COUNT(cpv_dokument.cpv_nummer) DESC; """.format(bidder) return pd.read_sql(query, connection); # @param bidder: anbieter.institution formatted as string of which you want to see the CPV code diversity # @return a Pandas Dataframe that contains a the diversity of CPV codes per bidder def getCpvDiversity(bidder): query = """SELECT anbieter.institution, COUNT(beruecksichtigteanbieter_zuschlag.anbieter_id) AS "Anzahl Zuschläge", COUNT(DISTINCT cpv_dokument.cpv_nummer) AS "Anzahl einzigartige CPV-Codes", SUM(IF(beruecksichtigteanbieter_zuschlag.preis_summieren = 1,beruecksichtigteanbieter_zuschlag.preis,0)) AS "Ungefähres Zuschlagsvolumen", MIN(zuschlag.datum_publikation) AS "Von", MAX(zuschlag.datum_publikation) AS "Bis" FROM cpv, cpv_dokument, zuschlag, beruecksichtigteanbieter_zuschlag, anbieter WHERE cpv.cpv_nummer = cpv_dokument.cpv_nummer AND cpv_dokument.meldungsnummer = zuschlag.meldungsnummer AND zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer AND beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id AND anbieter.institution="{}" GROUP BY anbieter.institution ORDER BY `Anzahl einzigartige CPV-Codes` DESC """.format(bidder) return pd.read_sql(query, connection); # @param select_anbieter: SQL SELECT for the bidder side. Backup: ''' select_an = ( "anbieter.anbieter_id, " "anbieter.anbieter_plz, " "anbieter.institution as anbieter_insitution, " "cpv_dokument.cpv_nummer as anbieter_cpv, " "ausschreibung.meldungsnummer" ) ''' # @param select_aus: SQL SELECT for the open tenders. Backup: ''' select_aus = ( "anbieter.anbieter_id, " "auftraggeber.institution as beschaffungsstelle_institution, " "auftraggeber.beschaffungsstelle_plz, " "ausschreibung.gatt_wto, " "cpv_dokument.cpv_nummer as ausschreibung_cpv, " "ausschreibung.meldungsnummer" ) ''' # @param bidder: the bidder formatted as string you or do not want the corresponding responses from # @param response: True if you want all the tenders of the bidder or False if you do not want any (the negative response) # @return a dataframe containing negative or positive bidding cases of a chosen bidder def getResponses(select_anbieter, select_ausschreibung, bidder, response): resp = '='; if (not response): resp = '!=' query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer) WHERE anbieter.institution {} "{}" ) anbieter JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer) WHERE anbieter.institution {} "{}" ) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer ORDER BY ausschreibung.meldungsnummer2; """.format(select_anbieter, resp, bidder, select_ausschreibung, resp, bidder) return pd.read_sql(query, connection); # @return def getCpvRegister(): return pd.read_sql("SELECT * FROM cpv", connection); # @param select_an # @param select_aus # @param anbieter # @return def createAnbieterDf(select_an, select_aus, anbieter): # Create a new DFs one containing all positiv, one all the negative responses data_pos = getResponses(select_an, select_aus, anbieter, True) data_neg = getResponses(select_an, select_aus, anbieter, False) return data_pos.copy(), data_neg.copy() # ======================== # MODEL CREATION FUNCTIONS # ======================== # @param df_pos_full # @param df_neg_full # @param negSampleSize # @return def decisionTreeRun(df_pos_full, df_neg_full , neg_sample_size): df_pos = df_pos_full # Create a random DF subset ussed to train the model on df_neg = df_neg_full.sample(neg_sample_size) # Assign pos/neg lables to both DFs df_pos['Y']=1 df_neg['Y']=0 # Merge the DFs into one df_appended = df_pos.append(df_neg, ignore_index=True) # Clean PLZ property df_appended[['anbieter_plz']] = df_appended[['anbieter_plz']].applymap(tonumeric) df_appended[['beschaffungsstelle_plz']] = df_appended[['beschaffungsstelle_plz']].applymap(tonumeric) # Shuffle the df df_tree = df_appended.sample(frac=1) # Put responses in one arry and all diesired properties in another y = df_tree.iloc[:,[11]] x = df_tree.iloc[:,[1,3,7,9]] # create sets xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25) # train the model on training sets clf = tree.DecisionTreeClassifier() clf = clf.fit(xtrain, ytrain) # predict on the test sets res = clf.predict(xtest) ytest["res"]= res ytest['richtig'] = ytest['res']==ytest['Y'] tp = ytest[(ytest['Y']==1) & (ytest['res']==1)] tn = ytest[(ytest['Y']==0) & (ytest['res']==0)] fp = ytest[(ytest['Y']==0) & (ytest['res']==1)] fn = ytest[(ytest['Y']==1) & (ytest['res']==0)] return len(df_pos.index) / neg_sample_size, accuracy_score(ytest.Y, res), confusion_matrix(ytest.Y, res); # @param full_neg: dataframe containing all negative responses for that bidder # @param df_pos_size: amount of data in the positive dataframe # @param amount_neg_def: how many response_negative dataframes the function will produce # @param pos_neg_ratio: what the ratio of positive to negative responses will be # @return a list of negative response dataframes, each considered for one run def createNegativeResponses(full_neg, pos_df_size, amount_neg_df, pos_neg_ratio): all_negatives = []; sample_size = math.ceil(pos_df_size * (pos_neg_ratio + 1)); for count in range(amount_neg_df): all_negatives.append(full_neg.sample(sample_size, random_state=count)); return all_negatives; # ======================= # DATA CLEANING FUNCTIONS # ======================= # @param val: a value to be casted to numeric # @return a value that has been casted to an integer. Returns 0 if cast was not possible def tonumeric(val): try: return int(val) except: return 0 # @param val: a string value to be categorised # @return uniffied gatt_wto resulting in either "Yes", "No" or "?" def unifyYesNo(val): switcher = { 'Ja': 1, 'Sì': 1, 'Oui': 1, 'Nein': 0, 'Nei': 0, 'Non': 0, } return switcher.get(val, 0) # TODO: Kategorien mit Matthias absprechen # @param v: the price of a procurement # @return map prices to 16 categories def createPriceCategory(val): try: val = int(val) except: val = -1 if val == 0: return 0 if 0 < val <= 100000: return 1 if 100000 < val <= 250000: return 2 if 250000 < val <= 500000: return 3 if 500000 < val <= 750000: return 4 if 750000 < val <= 1000000: return 5 if 1000000 < val <= 2500000: return 6 if 2500000 < val <= 5000000: return 7 if 5000000 < val <= 10000000: return 8 if 10000000 < val <= 25000000: return 9 if 25000000 < val <= 50000000: return 10 if 50000000 < val <= 100000000: return 11 if 100000000 < val <= 200000000: return 12 if 200000000 < val <= 500000000: return 13 if val > 500000000: return 14 else: return -1
/learn.py
import pandas as pd import numpy as np import math import re from datetime import datetime from sklearn.utils import shuffle from sklearn.model_selection import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef from sklearn import tree from db import connection, engine import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class ModelTrainer(): def __init__(self, select, anbieter, config, attributes=[]): self.anbieter = anbieter self.select = select self.attributes = attributes self.config = config def run(self): self.queryData() prepared_positives, prepared_negatives, duplicates = self.prepare_data() result = self.trainAllModels(prepared_positives, prepared_negatives) result['duplicates'] = duplicates.to_dict() return result def resetSQLData(self): try: del self.positives del self.negatives except: pass def trainAllModels(self, positives, negatives): result = { 'attributes': self.attributes, 'anbieter': self.anbieter, 'timestamp': datetime.now().isoformat() } samples = self.createSamples(positives, negatives) result = {**result, **self.trainAllAlgorithms(samples)} return result def createSamples(self, positives, negatives): negative_sample_size = math.ceil(len(positives) * (self.config['positive_to_negative_ratio'] + 1)) samples = [] for runIndex in range(self.config['runs']): negative_sample = negatives.sample(negative_sample_size, random_state=runIndex) sample = positives.append(negative_sample, ignore_index=True) sample.reset_index(drop=True, inplace=True) sample.fillna(0, inplace=True) sample = shuffle(sample, random_state=runIndex) samples.append(sample) return samples def trainAllAlgorithms(self, samples): result = {} for algorithm in self.config['enabled_algorithms']: if algorithm == 'random_forest': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] min_samples_split = self.config[algorithm]['min_samples_split'] classifier = lambda randomState: RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, min_samples_split=min_samples_split, random_state=randomState, n_jobs=-1 ) elif algorithm == 'gradient_boost': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] learning_rate = self.config[algorithm]['learning_rate'] classifier = lambda randomState: GradientBoostingClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, learning_rate=learning_rate, random_state=randomState ) elif algorithm == 'decision_tree': max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] classifier = lambda randomState: DecisionTreeClassifier( max_depth=max_depth, max_features=max_features ) else: raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm)) result[algorithm] = {} x_tests, y_tests = self.trainModel(samples, classifier, algorithm) result[algorithm]['metrics'] = self.config[algorithm] evaluation_dataframe = pd.concat([self.__getConfusionMatices(y_tests), self.__getAccuracies(y_tests)], axis=1, sort=False) result[algorithm]['data'] = evaluation_dataframe.to_dict() result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe) return result def trainModel(self, samples, get_classifier, algorithm): x_tests = [] y_tests = [] for runIndex, sample in enumerate(samples): classifier = get_classifier(runIndex) train, test = train_test_split(sample, random_state=runIndex) if 'skip_cross_val' not in self.config or not self.config['skip_cross_val']: # Compute cross validation (5-fold) scores = self.__cross_val_score(classifier, train, cv=5) print(scores) print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, runIndex, round(sum(scores)/len(scores), 4))) # Select all attributes x_test = test.drop(['Y'], axis=1) x_train = train.drop(['Y'], axis=1) # Only select the response result attributes y_test = test[['Y']].copy() y_train = train[['Y']] # Create the model # Train the model on training sets classifier = classifier.fit(x_train, y_train['Y']) # print the max_depths of all classifiers in a Random Forest if algorithm == 'random_forest': print('Random Forest Depts:', [self.dt_max_depth(t.tree_) for t in classifier.estimators_]) # Create a file displaying the tree if 'draw_tree' in self.config and self.config['draw_tree'] and algorithm == 'decision_tree' and runIndex == 0: tree.export_graphviz(classifier, out_file='tree.dot', feature_names=x_train.columns) # Predict on the test sets prediction = classifier.predict(x_test) # Add run number to df y_test['run'] = runIndex x_test['run'] = runIndex # add prediction to df y_test['prediction'] = prediction # add result of run to df y_test['correct'] = y_test['prediction'] == y_test['Y'] # add run to run arrays x_tests.append(x_test) y_tests.append(y_test) return x_tests, y_tests def queryData(self): if not hasattr(self, 'positives') or not hasattr(self, 'negatives'): self.positives = self.__runSql(True) self.negatives = self.__runSql(False) logger.info('sql done') return self.positives, self.negatives def __runSql(self, response): resp = '=' if (not response): resp = '!=' query = """SELECT {} from beruecksichtigteanbieter_zuschlag JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer WHERE anbieter.institution {} "{}" ORDER BY ausschreibung.meldungsnummer; """.format(self.select, resp, self.anbieter) return pd.read_sql(query, engine) def prepareUnfilteredRun(self, positive_sample, negative_samples): merged_samples_for_names = [] for negative_sample in negative_samples: # Merge positive and negative df into one merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy()) return merged_samples_for_names def __getAccuracies(self, dfys): res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate']) for dfy in dfys: acc = round(accuracy_score(dfy.Y, dfy.prediction), 4) # f1 = round(f1_score(dfy.Y, dfy.prediction), 4) mcc = matthews_corrcoef(dfy.Y, dfy.prediction) matrix = confusion_matrix(dfy.Y, dfy.prediction) fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4) # add row to end of df, *100 for better % readability res.loc[len(res)] = [ acc*100, mcc, fnr*100 ] return res def __getConfusionMatices(self, dfys): res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn']) for dfy in dfys: # ConfusionMatrix legende: # [tn, fp] # [fn, tp] matrix = confusion_matrix(dfy.Y, dfy.prediction) res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ] # res.loc['sum'] = res.sum() # Summarize each column return res def __getIterationMetadata(self, df): res = {} res['acc_mean'] = df['accuracy'].mean() res['acc_median'] = df['accuracy'].median() res['acc_min'] = df['accuracy'].min() res['acc_max'] = df['accuracy'].max() res['acc_quantile_25'] = df['accuracy'].quantile(q=.25) res['acc_quantile_75'] = df['accuracy'].quantile(q=.75) res['mcc_mean'] = df['MCC'].mean() res['mcc_median'] = df['MCC'].median() res['mcc_min'] = df['MCC'].min() res['mcc_max'] = df['MCC'].max() res['mcc_quantile_25'] = df['MCC'].quantile(q=.25) res['mcc_quantile_75'] = df['MCC'].quantile(q=.75) res['fn_rate_mean'] = df['fn_rate'].mean() res['fn_rate_median'] = df['fn_rate'].median() res['fn_rate_min'] = df['fn_rate'].min() res['fn_rate_max'] = df['fn_rate'].max() res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25) res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75) res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean() return res def __cross_val_score(self, clf, sample, cv): cross_val_scores = [] for validation_run_index in range(cv): train, test = train_test_split(sample, random_state=validation_run_index) # Select all attributes but meldungsnummer xtest = test.drop(['Y'], axis=1) xtrain = train.drop(['Y'], axis=1) # Only select the response result attributes ytest = test[['Y']] ytrain = train[['Y']] clf = clf.fit(xtrain, ytrain['Y']) prediction = clf.predict(xtest) cross_val_scores.append(accuracy_score(ytest, prediction)) return cross_val_scores def prepare_data(self): filter_attributes = ['meldungsnummer'] + self.attributes # filter only specified attributes positives = self.positives[filter_attributes].copy() negatives = self.negatives[filter_attributes].copy() positives['Y'] = 1 negatives['Y'] = 0 merged = positives.append(negatives, ignore_index=True) if hasattr(self, 'cleanData'): positives = self.cleanData(positives, self.attributes) negatives = self.cleanData(negatives, self.attributes) else: # positives = self.preprocess_data(positives, self.attributes) # negatives = self.preprocess_data(negatives, self.attributes) merged, duplicates = self.preprocess_data(merged, self.attributes) positives = merged[merged['Y']==1] negatives = merged[merged['Y']==0] return positives, negatives, duplicates def preprocess_data(self, df, filters): df = df.copy() # drop duplicates before starting to preprocess df = df.drop_duplicates() if 'ausschreibung_cpv' in filters: split = { 'division': lambda x: math.floor(x/1000000), 'group': lambda x: math.floor(x/100000), 'class': lambda x: math.floor(x/10000), 'category': lambda x: math.floor(x/1000) } for key, applyFun in split.items(): df['cpv_' + key ] = df['ausschreibung_cpv'].apply(applyFun) tmpdf = {} for key in split.keys(): key = 'cpv_' + key tmpdf[key] = df[['meldungsnummer']].join(pd.get_dummies(df[key], prefix=key)).groupby('meldungsnummer').max() encoded_df = pd.concat([tmpdf['cpv_'+ key] for key in split.keys()], axis=1) df = df.drop(['cpv_' + key for key, fun in split.items()], axis=1) df = df.drop(['ausschreibung_cpv'], axis=1) df = df.drop_duplicates() df = df.join(encoded_df, on='meldungsnummer') if 'gatt_wto' in filters: df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo) if 'anzahl_angebote' in filters: df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric) if 'teilangebote' in filters: df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo) if 'lose' in filters: df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNoOrInt) if 'varianten' in filters: df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo) if 'auftragsart_art' in filters: auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt', dummy_na=True) df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'], axis=1) if 'sprache' in filters: sprache_df = pd.get_dummies(df['sprache'], prefix='lang', dummy_na=True) df = pd.concat([df,sprache_df],axis=1).drop(['sprache'], axis=1) if 'auftragsart' in filters: auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr', dummy_na=True) df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'], axis=1) if 'beschaffungsstelle_plz' in filters: # plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz', dummy_na=True) # df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'], axis=1) df['beschaffungsstelle_plz'] = df['beschaffungsstelle_plz'].apply(ModelTrainer.transformToSingleInt) split = { 'district': lambda x: math.floor(x/1000) if not math.isnan(x) else x, 'area': lambda x: math.floor(x/100) if not math.isnan(x) else x, } prefix = 'b_plz_' for key, applyFun in split.items(): df[prefix + key] = df['beschaffungsstelle_plz'].apply(applyFun) df.rename(columns={'beschaffungsstelle_plz': prefix + 'ganz'}, inplace=True) for key in ['ganz'] + list(split.keys()): key = prefix + key df = pd.concat([df, pd.get_dummies(df[key], prefix=key, dummy_na=True)], axis=1).drop(key, axis=1) df.drop_duplicates(inplace=True) if any(df.duplicated(['meldungsnummer'])): logger.warning("duplicated meldungsnummer") duplicates = df[df.duplicated(['meldungsnummer'])] df = df.drop(['meldungsnummer'], axis=1) return df, duplicates def dt_max_depth(self, tree): n_nodes = tree.node_count children_left = tree.children_left children_right = tree.children_right def walk(node_id): if (children_left[node_id] != children_right[node_id]): left_max = 1 + walk(children_left[node_id]) right_max = 1 + walk(children_right[node_id]) return max(left_max, right_max) else: # is leaf return 1 root_node_id = 0 return walk(root_node_id) # @param val: a value to be casted to numeric # @return a value that has been casted to an integer. Returns 0 if cast was not possible def tonumeric(val): try: return int(val) except: return 0 # @param val: a string value to be categorised # @return uniffied gatt_wto resulting in either "Yes", "No" or "?" @staticmethod def unifyYesNo(val): switcher = { 'Ja': 1, 'Sì': 1, 'Oui': 1, 'YES': 1, 'Nein': 0, 'Nei': 0, 'Non': 0, 'NO': 0, } return switcher.get(val, 0) @staticmethod def unifyYesNoOrInt(val): try: return int(val) except ValueError: return ModelTrainer.unifyYesNo(val) @staticmethod def transformToSingleInt(plz): try: result = int(plz) except ValueError: try: result = int(re.search(r"\d{4}", plz).group()) except AttributeError: return np.nan return result if result >= 1000 and result <= 9999 else np.nan
/runIterations.py
from learn import ModelTrainer from collection import Collection import pandas as pd import logging import traceback import os logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # === THESIS === anbieter_config = { 'Construction': [ 'Alpiq AG', 'KIBAG', 'Egli AG', ], 'IT': [ 'Swisscom', 'ELCA Informatik AG', 'Unisys', ], 'Other': [ 'Kummler + Matter AG', 'Thermo Fisher Scientific (Schweiz) AG', 'AXA Versicherung AG', ], 'Diverse': [ 'Siemens AG', 'ABB', 'Basler & Hofmann West AG', ] } # === TESTING === #anbieter = 'Marti AG' #456 #anbieter = 'Axpo AG' #40 #anbieter = 'Hewlett-Packard' #90 #anbieter = 'BG Ingénieurs Conseils' SA #116 #anbieter = 'Pricewaterhousecoopers' #42 #anbieter = 'Helbling Beratung + Bauplanung AG' #20 #anbieter = 'Ofrex SA' #52 #anbieter = 'PENTAG Informatik AG' #10 #anbieter = 'Wicki Forst AG' #12 #anbieter = 'T-Systems Schweiz' #18 #anbieter = 'Bafilco AG' #20 #anbieter = '4Video-Production GmbH' #3 #anbieter = 'Widmer Ingenieure AG' #6 #anbieter = 'hmb partners AG' #2 #anbieter = 'Planmeca' #4 #anbieter = 'K & M Installationen AG' #4 select = ( "ausschreibung.meldungsnummer, " "anbieter.institution as anbieter_institution, " "auftraggeber.beschaffungsstelle_plz, " "ausschreibung.gatt_wto, " "ausschreibung.sprache, " "ausschreibung.auftragsart, " "ausschreibung.auftragsart_art, " "ausschreibung.lose, " "ausschreibung.teilangebote, " "ausschreibung.varianten, " "ausschreibung.bietergemeinschaft, " "cpv_dokument.cpv_nummer as ausschreibung_cpv" ) attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache'] #attributes = ['auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'ausschreibung_cpv', 'gatt_wto','teilangebote', 'sprache'] #attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache'] # attributes = [ # [ 'ausschreibung_cpv', 'auftragsart_art' ], # [ 'ausschreibung_cpv', 'beschaffungsstelle_plz' ], # [ 'ausschreibung_cpv', 'auftragsart' ], # [ 'ausschreibung_cpv', 'gatt_wto' ], # [ 'ausschreibung_cpv', 'lose' ], # [ 'ausschreibung_cpv', 'teilangebote' ], # [ 'ausschreibung_cpv', 'varianten' ], # [ 'ausschreibung_cpv', 'sprache' ] # ] config = { # ratio that the positive and negative responses have to each other 'positive_to_negative_ratio': 0.5, # Percentage of training set that is used for testing (Recommendation of at least 25%) 'test_size': 0.25, 'runs': 100, #'enabled_algorithms': ['random_forest'], 'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'], 'random_forest': { # Tune Random Forest Parameter 'n_estimators': 100, 'max_features': 'sqrt', 'max_depth': None, 'min_samples_split': 4 }, 'decision_tree': { 'max_depth': 30, 'max_features': 'sqrt', 'min_samples_split': 4 }, 'gradient_boost': { 'n_estimators': 100, 'learning_rate': 0.1, 'max_depth': 30, 'min_samples_split': 4, 'max_features': 'sqrt' } } class IterationRunner(): def __init__(self, anbieter_config, select, attributes, config): self.anbieter_config = anbieter_config self.select = select self.attributes = attributes self.config = config self.trainer = ModelTrainer(select, '', config, attributes) self.collection = Collection() def run(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr_id in range(len(self.attributes)): att_list = self.attributes[:attr_id+1] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runAttributesEachOne(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr in self.attributes: att_list = [attr] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runAttributesList(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for att_list in self.attributes: self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runSimpleAttributeList(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: self.singleRun(anbieter, self.attributes, label) self.trainer.resetSQLData() def singleRun(self, anbieter, att_list, label): logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list)) try: self.trainer.attributes = att_list self.trainer.anbieter = anbieter output = self.trainer.run() output['label'] = label self.collection.append(output) filename = os.getenv('DB_FILE', 'dbs/auto.json') self.collection.to_file(filename) except Exception as e: traceback.print_exc() print(e) print('one it done') runner = IterationRunner(anbieter_config, select, attributes, config) if __name__ == '__main__': # runner.collection.import_file('dbs/auto.json') runner.run() runner.runAttributesEachOne() runner.runAttributesList() # label, anbieters = next(iter(runner.anbieter_config.items())) # print(label)
/runOldIterations.py
from train import ModelTrainer from collection import Collection import pandas as pd import logging import traceback import os logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # === THESIS === anbieter_config = { 'Construction': [ 'Alpiq AG', 'Swisscom', 'Kummler + Matter AG', 'Siemens AG' ], 'IT': [ 'G. Baumgartner AG', 'ELCA Informatik AG', 'Thermo Fisher Scientific (Schweiz) AG', 'Arnold AG', ], 'Other': [ 'Riget AG', 'isolutions AG', 'CSI Consulting AG', 'Aebi & Co. AG Maschinenfabrik', ], 'Divers': [ 'DB Schenker AG', 'IT-Logix AG', 'AVS Syteme AG', 'Sajet SA' ] } # === TESTING === #anbieter = 'Marti AG' #456 #anbieter = 'Axpo AG' #40 #anbieter = 'Hewlett-Packard' #90 #anbieter = 'BG Ingénieurs Conseils' SA #116 #anbieter = 'Pricewaterhousecoopers' #42 #anbieter = 'Helbling Beratung + Bauplanung AG' #20 #anbieter = 'Ofrex SA' #52 #anbieter = 'PENTAG Informatik AG' #10 #anbieter = 'Wicki Forst AG' #12 #anbieter = 'T-Systems Schweiz' #18 #anbieter = 'Bafilco AG' #20 #anbieter = '4Video-Production GmbH' #3 #anbieter = 'Widmer Ingenieure AG' #6 #anbieter = 'hmb partners AG' #2 #anbieter = 'Planmeca' #4 #anbieter = 'K & M Installationen AG' #4 select_anbieter = ( "anbieter.anbieter_id, " "anbieter.institution as anbieter_institution, " "cpv_dokument.cpv_nummer as anbieter_cpv, " "ausschreibung.meldungsnummer" ) # anbieter_CPV are all the CPVs the Anbieter ever won a procurement for. So all the CPVs they are interested in. select_ausschreibung = ( "anbieter.anbieter_id, " "auftraggeber.institution as beschaffungsstelle_institution, " "auftraggeber.beschaffungsstelle_plz, " "ausschreibung.gatt_wto, " "ausschreibung.sprache, " "ausschreibung.auftragsart_art, " "ausschreibung.lose, " "ausschreibung.teilangebote, " "ausschreibung.varianten, " "ausschreibung.projekt_id, " # "ausschreibung.titel, " "ausschreibung.bietergemeinschaft, " "cpv_dokument.cpv_nummer as ausschreibung_cpv, " "ausschreibung.meldungsnummer as meldungsnummer2" ) attributes = ['ausschreibung_cpv', 'auftragsart_art','beschaffungsstelle_plz','gatt_wto','lose','teilangebote', 'varianten','sprache'] # attributes = ['auftragsart_art'] config = { # ratio that the positive and negative responses have to each other 'positive_to_negative_ratio': 0.5, # Percentage of training set that is used for testing (Recommendation of at least 25%) 'test_size': 0.25, 'runs': 100, #'enabled_algorithms': ['random_forest'], 'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'], 'random_forest': { # Tune Random Forest Parameter 'n_estimators': 100, 'max_features': 'sqrt', 'max_depth': None, 'min_samples_split': 2 }, 'decision_tree': { 'max_depth': 15, 'max_features': 'sqrt' }, 'gradient_boost': { 'n_estimators': 100, 'learning_rate': 0.1, 'max_depth': 15, 'max_features': 'sqrt' } } # Prepare Attributes def cleanData(df, filters): # if 'beschaffungsstelle_plz' in filters: # df[['beschaffungsstelle_plz']] = df[['beschaffungsstelle_plz']].applymap(ModelTrainer.tonumeric) if 'gatt_wto' in filters: df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo) if 'anzahl_angebote' in filters: df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric) if 'teilangebote' in filters: df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo) if 'lose' in filters: df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNo) if 'varianten' in filters: df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo) if 'auftragsart_art' in filters: auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt',dummy_na=True) df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'],axis=1) if 'sprache' in filters: sprache_df = pd.get_dummies(df['sprache'], prefix='lang',dummy_na=True) df = pd.concat([df,sprache_df],axis=1).drop(['sprache'],axis=1) if 'auftragsart' in filters: auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr',dummy_na=True) df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'],axis=1) if 'beschaffungsstelle_plz' in filters: plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz',dummy_na=True) df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'],axis=1) return df class IterationRunner(): def __init__(self, anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData): self.anbieter_config = anbieter_config self.select_anbieter = select_anbieter self.select_ausschreibung = select_ausschreibung self.attributes = attributes self.config = config self.cleanData = cleanData self.trainer = ModelTrainer(select_anbieter, select_ausschreibung, '', config, cleanData, attributes) self.collection = Collection() def run(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr_id in range(len(self.attributes)-1): att_list = self.attributes[:attr_id+1] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runAttributesEachOne(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr in self.attributes: att_list = [attr] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runSimpleAttributeList(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: self.singleRun(anbieter, self.attributes, label) self.trainer.resetSQLData() def singleRun(self, anbieter, att_list, label): logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list)) try: self.trainer.attributes = att_list self.trainer.anbieter = anbieter output = self.trainer.run() output['label'] = label self.collection.append(output) filename = os.getenv('DB_FILE', 'dbs/auto.json') self.collection.to_file(filename) except Exception as e: traceback.print_exc() print(e) print('one it done') runner = IterationRunner(anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData) if __name__ == '__main__': # runner.collection.import_file('dbs/auto.json') runner.run() runner.runAttributesEachOne() # label, anbieters = next(iter(runner.anbieter_config.items())) # print(label)
/train.py
import pandas as pd import math from datetime import datetime from sklearn.utils import shuffle from sklearn.model_selection import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef from db import connection, engine import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class ModelTrainer(): def __init__(self, select_anbieter, select_ausschreibung, anbieter, config, cleanData, attributes=[]): self.anbieter = anbieter self.select_anbieter = select_anbieter self.select_ausschreibung = select_ausschreibung self.attributes = attributes self.config = config self.cleanData = cleanData def run(self): positive_sample, negative_samples = self.createSamples() positive_and_negative_samples = self.prepareForRun( positive_sample, negative_samples ) # most certainly used to resolve the naming functions like getFalseProjectTitle merged_samples_for_names = self.prepareUnfilteredRun( positive_sample, negative_samples ) result = self.trainSpecifiedModels(positive_and_negative_samples) return result # xTests, yTests = self.trainModel(positive_and_negative_samples) def resetSQLData(self): try: del self.positives del self.negatives except: pass def createSamples(self): if not hasattr(self, 'positives') or not hasattr(self, 'negatives'): self.queryData() negative_samples = [] negative_sample_size = math.ceil(len(self.positives) * (self.config['positive_to_negative_ratio'] + 1)) for count in range(self.config['runs']): negative_samples.append(self.negatives.sample(negative_sample_size, random_state=count)) self.positives['Y'] = 1 for negative_sample in negative_samples: negative_sample['Y']=0 return (self.positives, negative_samples) def queryData(self): self.positives = self.__runSql(True) self.negatives = self.__runSql(False) logger.info('sql done') return self.positives, self.negatives def __runSql(self, response): resp = '=' if (not response): resp = '!=' query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer) WHERE anbieter.institution {} "{}" ) anbieter JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer) WHERE anbieter.institution {} "{}" ) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer ORDER BY ausschreibung.meldungsnummer2; """.format(self.select_anbieter, resp, self.anbieter, self.select_ausschreibung, resp, self.anbieter) return pd.read_sql(query, engine) def prepareForRun(self, positive_sample, negative_samples): # What attributes the model will be trained by filters = ['Y', 'projekt_id'] + self.attributes positive_and_negative_samples = [] for negative_sample in negative_samples: # Merge positive and negative df into one, only use selected attributes merged_samples = positive_sample.append(negative_sample, ignore_index=True)[filters].copy() # Clean the data of all selected attributes cleaned_merged_samples = self.cleanData(merged_samples, self.attributes) positive_and_negative_samples.append(cleaned_merged_samples) return positive_and_negative_samples def prepareUnfilteredRun(self, positive_sample, negative_samples): merged_samples_for_names = [] for negative_sample in negative_samples: # Merge positive and negative df into one merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy()) return merged_samples_for_names def trainSpecifiedModels(self, positive_and_negative_samples): result = {} for algorithm in self.config['enabled_algorithms']: if algorithm == 'random_forest': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] min_samples_split = self.config[algorithm]['min_samples_split'] classifier = lambda randomState: RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, min_samples_split=min_samples_split, random_state=randomState, n_jobs=-1 ) elif algorithm == 'gradient_boost': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] learning_rate = self.config[algorithm]['learning_rate'] classifier = lambda randomState: GradientBoostingClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, learning_rate=learning_rate, random_state=randomState ) elif algorithm == 'decision_tree': max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] classifier = lambda randomState: DecisionTreeClassifier( max_depth=max_depth, max_features=max_features ) else: raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm)) result[algorithm] = {} xTests, yTests = self.trainModel(positive_and_negative_samples, classifier, algorithm) result['attributes'] = self.attributes result['anbieter'] = self.anbieter result['timestamp'] = datetime.now().isoformat() #result[algorithm]['xTests'] = xTests #result[algorithm]['yTests'] = yTests result[algorithm]['metrics'] = self.config[algorithm] evaluation_dataframe =pd.concat([self.__getConfusionMatices(yTests), self.__getAccuracies(yTests)], axis=1, sort=False) result[algorithm]['data'] = evaluation_dataframe.to_dict() result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe) return result def trainModel(self, positive_and_negative_samples, classifier, algorithm): xTests = [] yTests = [] for idx, df in enumerate(positive_and_negative_samples): # enum to get index x_and_y_test, x_and_y_train = self.unique_train_and_test_split(df, random_state=idx) # Select all attributes xtest = x_and_y_test.drop(['Y'], axis=1) xtrain = x_and_y_train.drop(['Y'], axis=1) # Only select the response result attributes ytest = x_and_y_test['Y'] ytrain = x_and_y_train['Y'] # Create the model clf = classifier(randomState=idx) # Compute cross validation (5-fold) scores = self.__cross_val_score(clf, xtest, ytest, cv=5) print(scores) print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, idx, round(sum(scores)/len(scores), 4))) xtest = xtest.drop(['projekt_id'], axis=1) xtrain = xtrain.drop(['projekt_id'], axis=1) # Train the model on training sets clf = clf.fit(xtrain, ytrain) # Predict on the test sets prediction = clf.predict(xtest) # Convert pandas.series to data frame df_ytest = ytest.to_frame() # Add run number to df df_ytest['run'] = idx xtest['run'] = idx # add prediction to df df_ytest['prediction']= prediction # add result of run to df df_ytest['correct'] = df_ytest['prediction']==df_ytest['Y'] # add run to run arrays xTests.append(xtest) yTests.append(df_ytest) return xTests, yTests def __getAccuracies(self, dfys): res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate']) for dfy in dfys: acc = round(accuracy_score(dfy.Y, dfy.prediction), 4) # f1 = round(f1_score(dfy.Y, dfy.prediction), 4) mcc = matthews_corrcoef(dfy.Y, dfy.prediction) matrix = confusion_matrix(dfy.Y, dfy.prediction) fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4) # add row to end of df, *100 for better % readability res.loc[len(res)] = [ acc*100, mcc, fnr*100 ] return res def __getConfusionMatices(self, dfys): res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn']) for dfy in dfys: # ConfusionMatrix legende: # [tn, fp] # [fn, tp] matrix = confusion_matrix(dfy.Y, dfy.prediction) res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ] # res.loc['sum'] = res.sum() # Summarize each column return res def __getIterationMetadata(self, df): res = {} res['acc_mean'] = df['accuracy'].mean() res['acc_median'] = df['accuracy'].median() res['acc_min'] = df['accuracy'].min() res['acc_max'] = df['accuracy'].max() res['acc_quantile_25'] = df['accuracy'].quantile(q=.25) res['acc_quantile_75'] = df['accuracy'].quantile(q=.75) res['mcc_mean'] = df['MCC'].mean() res['mcc_median'] = df['MCC'].median() res['mcc_min'] = df['MCC'].min() res['mcc_max'] = df['MCC'].max() res['mcc_quantile_25'] = df['MCC'].quantile(q=.25) res['mcc_quantile_75'] = df['MCC'].quantile(q=.75) res['fn_rate_mean'] = df['fn_rate'].mean() res['fn_rate_median'] = df['fn_rate'].median() res['fn_rate_min'] = df['fn_rate'].min() res['fn_rate_max'] = df['fn_rate'].max() res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25) res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75) res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean() return res def __cross_val_score(self, clf, x_values, y_values, cv): x_and_y_values = pd.concat([y_values, x_values], axis=1) cross_val_scores = [] for validation_run_index in range(cv): x_and_y_test, x_and_y_train = self.unique_train_and_test_split(x_and_y_values, random_state=validation_run_index) # Select all attributes but meldungsnummer xtest = x_and_y_test.drop(['projekt_id', 'Y'], axis=1) xtrain = x_and_y_train.drop(['projekt_id', 'Y'], axis=1) # Only select the response result attributes ytest = x_and_y_test['Y'] ytrain = x_and_y_train['Y'] clf = clf.fit(xtrain, ytrain) prediction = clf.predict(xtest) cross_val_scores.append(accuracy_score(ytest, prediction)) return cross_val_scores def unique_train_and_test_split(self, df, random_state): run = shuffle(df, random_state=random_state) # run index as random state # Get each runs unique meldungsnummer unique_mn = run.projekt_id.unique() # Split the meldungsnummer between test and trainings set so there will be no bias in test set x_unique_test, x_unique_train = train_test_split(unique_mn, test_size=self.config['test_size'], random_state=random_state) # Add the remaining attributes to meldungsnummer x_and_y_test = run[run['projekt_id'].isin(x_unique_test)].copy() x_and_y_train = run[run['projekt_id'].isin(x_unique_train)].copy() return x_and_y_test, x_and_y_train # @param val: a value to be casted to numeric # @return a value that has been casted to an integer. Returns 0 if cast was not possible def tonumeric(val): try: return int(val) except: return 0 # @param val: a string value to be categorised # @return uniffied gatt_wto resulting in either "Yes", "No" or "?" def unifyYesNo(val): switcher = { 'Ja': 1, 'Sì': 1, 'Oui': 1, 'Nein': 0, 'Nei': 0, 'Non': 0, } return switcher.get(val, 0)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
badgerlordy/smash-bros-reader
refs/heads/master
{"/smash_reader/smash.py": ["/smash_reader/logger.py"], "/smash_reader/smash_game.py": ["/smash_reader/logger.py"], "/smash_reader/smash_utility.py": ["/smash_reader/logger.py"], "/smash_reader/smash_watcher.py": ["/smash_reader/logger.py"]}
└── └── smash_reader ├── flags.py ├── logger.py ├── smash.py ├── smash_game.py ├── smash_utility.py ├── smash_watcher.py └── tests.py
/smash_reader/flags.py
import cv2 import datetime import numpy as np import os #import pytesseract as pyt import time from datetime import datetime from PIL import Image, ImageGrab, ImageDraw, ImageChops COORDS = { 'lobby-flag-screen-id': (379, 281, 1534, 445), 'lobby-flag-screen-player-markers': (70, 820, 1800, 821), 'flag-areas': ( [(763, 528, 1156, 792)], [(472, 531, 857, 788), (1062, 531, 1447, 788)], [(327, 531, 682, 768), (782, 531, 1137, 768), (1237, 531, 1592, 768)], [(273, 540, 582, 745), (627, 540, 936, 745), (981, 540, 1290, 745), (1335, 540, 1644, 745)] ) } HOME_DIR = os.path.dirname(os.path.realpath(__file__)) FLAG_DIR = os.path.join(HOME_DIR, 'flags') ########################################################### ########################### Main ########################## ########################################################### def main(): print('Starting') flags_dir = os.path.join(HOME_DIR, 'flags') if not os.path.isdir(flags_dir): os.mkdir(flags_dir) flag_list = [] for root, dirs, files in os.walk(flags_dir): for name in files: folder_index = int(os.path.split(root)[1]) if folder_index == len(flag_list): flag_list.append([name]) else: flag_list[folder_index].append(name) cooldown = 0 notif = False while True: if cooldown: cooldown -= 1 time.sleep(1) elif is_flag_screen(): notif = False print('Flag screen detected') img = ImageGrab.grab() img.save(os.path.join(HOME_DIR, 'screen.jpg')) flags = [] cooldown = 20 count = count_markers() if count > 0: count -= 1 flag_areas = COORDS['flag-areas'][count] for i, area in enumerate(flag_areas): flag = read_flag(i, area) if not flags: flags.append(flag) else: if not any([image_similarity(flag, _flag) for _flag in flags]): flags.append(flag) for flag in flags: name = new_flag(flag, flag_list) if name: print(f'New flag: {name}') else: if not notif: print('Waiting for flag screen') notif = True time.sleep(0.01) break ########################################################### ######################### Utility ######################### ########################################################### def time_this(func): def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() duration = '{:.2f}'.format(end_time - start_time) print(f'function: {func.__name__} executed in {duration} seconds') return result return wrapper def new_flag(flag, flag_list): size = flag.size size_str = f'{size[0]}x{size[1]}' name = f'{size_str}.tif' if flag_list: for i, group in enumerate(flag_list): path = os.path.join(FLAG_DIR, str(i)) _flag = Image.open(os.path.join(path, group[0])) if image_similarity(_flag, flag): if name in group: return None else: group.append(name) if not os.path.isdir(path): os.mkdir(path) flag.save(os.path.join(path, name)) return f'{i}\\{name}' path = os.path.join(FLAG_DIR, str(len(flag_list))) flag_list.append([name]) if not os.path.isdir(path): os.mkdir(path) flag.save(os.path.join(path, name)) return f'{str(len(flag_list))}\\{name}' ########################################################### ########################## Image ########################## ########################################################### #@time_this def is_flag_screen(): screen_crop = ImageGrab.grab(COORDS['lobby-flag-screen-id']) img_template = Image.open(os.path.join(HOME_DIR, 'template.jpg')) if image_similarity(screen_crop, img_template): return True else: return False #@time_this def convert_to_bw(pil_img, threshold=127): cv_img = np.array(pil_img) img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY) thresh, array_bw = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY_INV) pil_bw = Image.fromarray(array_bw) ImageDraw.floodfill(pil_bw, xy=(1, 1), value=0) return pil_bw, array_bw #@time_this def count_markers(): img = ImageGrab.grab(COORDS['lobby-flag-screen-player-markers']) bw_img, bw_arr = convert_to_bw(img) skip = 0 markers = 0 for i, pixel in enumerate(bw_arr[0]): if skip: skip -= 1 continue if pixel == 0: markers += 1 skip = 100 return markers #@time_this def read_flag(i, area): img = ImageGrab.grab(area) dt = datetime.fromtimestamp(time.time()) t = dt.strftime('%Y_%m_%d-%H.%M.%S') name = f'{t}-{i}.tif' flag_dir = os.path.join(HOME_DIR, 'flags') if not os.path.isdir(flag_dir): os.mkdir(flag_dir) return img def image_similarity(img1, img2, min_sim=90): thumb_img1 = img1.resize((64, 64)) thumb_img2 = img2.resize((64, 64)) bw1, arr1 = convert_to_bw(thumb_img1) bw2, arr2 = convert_to_bw(thumb_img2) bw1.show() bw2.show() diff = ImageChops.difference(bw1, bw2) arr = np.asarray(diff) total = 0 different = 0 for row in arr: for pixel in row: total += 1 if pixel == 255: different += 1 sim = ((1 - (different/total)) * 100) return sim > min_sim ########################################################### ######################### Launch ########################## ########################################################### if __name__ == '__main__': main()
/smash_reader/logger.py
from datetime import datetime import os from sys import __excepthook__ from time import time from traceback import format_exception BASE_DIR = os.path.realpath(os.path.dirname(__file__)) def log_exception(type, value, tb): error = format_exception(type, value, tb) filepath = os.path.join(BASE_DIR, 'error.log') old_text = '\n' if os.path.isfile(filepath): with open(filepath, 'r') as logfile: old_text += logfile.read() timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S') line = f'[{timestamp}]\n{("".join(error))}' new_text = line + old_text with open(filepath, 'w+') as logfile: logfile.write(new_text) __excepthook__(type, value, tb)
/smash_reader/smash.py
from datetime import datetime import json from logger import log_exception import numpy as np import os from PIL import Image, ImageTk import platform from queue import Queue, Empty import requests import smash_game import smash_utility as ut import smash_watcher from sys import argv, excepthook import time import tkinter as tk excepthook = log_exception TITLE = 'SmashBet Screen Watcher' output = False def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<GUI>') print(*args, **kwargs) BASE_DIR = os.path.realpath(os.path.dirname(__file__)) BG = ['#282C34', '#383D48'] FG = ['#9098A6', '#9DA5B4', '#ABB3BF', '#E06C75', '#61AFEF', '#56B6C2', '#98C379'] def config_grids(widget, rows=[], columns=[]): [widget.rowconfigure(i, weight=weight) for i, weight in enumerate(rows)] [widget.columnconfigure(i, weight=weight) for i, weight in enumerate(columns)] class Menubar(tk.Menu): def __init__(self, master): super().__init__(master) self.master = master self.file_menu = tk.Menu(self, tearoff=0) # self.file_menu.add_command(label='Load State', command=self.load_state) # self.file_menu.add_command(label='Save State', command=self.save_state) # self.file_menu.add_separator() self.file_menu.add_command(label='Restart', command=self.master.restart) self.file_menu.add_command(label='Quit', command=self.master.quit) self.debug_menu = tk.Menu(self, tearoff=0) self.debug_menu.add_command(label='Clear console', command=ut.clear_console) self.output_menu = tk.Menu(self, tearoff=0) self.output_menu.add_command( label='Silence watcher', command=lambda: self.toggle_output(smash_watcher, 'watcher', 0) ) self.output_menu.add_command( label='Silence game', command=lambda: self.toggle_output(smash_game, 'game', 1) ) self.output_menu.add_command( label='Silence utility', command=lambda: self.toggle_output(ut, 'utility', 2) ) self.debug_menu.add_cascade(label='Outputs', menu=self.output_menu) self.debug_menu.add_separator() self.debug_menu.add_command(label='Print game data', command=lambda: print(self.master.watcher.game.serialize(images_bool=False))) self.debug_menu.add_separator() self.debug_menu.add_command(label='Capture cards_id template', command=ut.capture_cards_id) self.debug_menu.add_command(label='Character name debugging', command=self.master.character_name_debugging) self.debug_menu.add_command(label='Click spectate', command=self.master.click_spectate) self.add_cascade(label='File', menu=self.file_menu) self.add_cascade(label='Debug', menu=self.debug_menu) def toggle_output(self, module, name, index): if module.output: self.output_menu.entryconfig(index, label=f'Unsilence {name}') else: self.output_menu.entryconfig(index, label=f'Silence {name}') module.output = not module.output def load_state(self): path = os.path.join(BASE_DIR, 'game_state.json') if os.path.isfile(path): with open(path, 'r') as infile: return json.load(infile) else: return None def save_state(self): game = self.master.game if game: path = os.path.join(BASE_DIR, 'game_state.json') with open(path, 'w+') as outfile: json.dump(game, outfile) class PlayerFrame(tk.Frame): def __init__(self, master, player_info, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master self.info = player_info config_grids(self, rows=[1, 1], columns=[1, 1]) self.player_number_label = tk.Label(self, text=f'Player {self.info["number"]}', bg=self['background']) self.player_number_label.grid(row=0, column=0, sticky='nsw', padx=10) self.character_name_label = tk.Label( self, text=f'Character: {self.info["character_name"].title()}', bg=self['background'] ) self.character_name_label.grid(row=0, column=1, sticky='nsw', padx=10) self.gsp_label = tk.Label(self, text=f'GSP: {self.info["gsp"]}', bg=self['background']) self.gsp_label.grid(row=1, column=0, sticky='nsw', padx=10) arr = np.array(self.info['player_name_image']) try: img = Image.fromarray(arr.astype('uint8')) img = img.resize((200, 30), Image.NEAREST) img = img.convert('1').tobitmap() bitmap = ImageTk.BitmapImage(data=img) self.player_name_label = tk.Label(self, image=bitmap, bg=self.master['background']) self.player_name_label.image = bitmap self.player_name_label.grid(row=1, column=1, sticky='nw', padx=10) except TypeError: _print(arr) _print('Image data corrupted') try: ut.dump_image_data(arr) _print('Image data successfully dumped') except: _print('Failed to dump image data') class TeamFrame(tk.Frame): def __init__(self, master, team_info, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master self.info = team_info self.build_player_frames() def build_player_frames(self): COLORS = { 'RED': (252, 208, 197), 'BLUE': (163, 220, 248), 'YELLOW': (246, 237, 166), 'GREEN': (160, 235, 186) } if self.info['placement']: self.placement_label = tk.Label( self, bg=self['background'], fg=BG[0], text=f'{self.info["placement"]} place' ) self.info['players'].sort(key=lambda player: player['number']) player_frames = [] player_len = len(self.info['players']) self.gsp_label = tk.Label(self, bg=self['background'], fg=BG[0], text=f'Team GSP: {self.info["gsp_total"]}') self.gsp_label.grid(row=0, column=1, columnspan=player_len, sticky='nsw') config_grids(self, rows=[1]*(player_len+1), columns=[1, 1]) config_grids(self, rows=[0]) for i, player in enumerate(self.info['players']): hex_color = ut.rgb_to_hex(COLORS[self.info['color']]) player_frames.append(PlayerFrame(self, player, bg=hex_color)) player_frames[i].grid(row=i+1, column=0, columnspan=2, sticky='nsew', padx=10, pady=(0, 10)) class GameFrame(tk.Frame): def __init__(self, master, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master self.game_number = tk.StringVar() self.game_mode = tk.StringVar() self.game_map = tk.StringVar() self.game_duration = tk.StringVar() config_grids(self, rows=[0, 1], columns=[1]) self.info_frame = tk.Frame(self, bg=BG[0]) config_grids(self.info_frame, rows=[1, 1], columns=[1, 1]) self.info_frame.grid(row=0, column=0, sticky='nsew') self.game_mode_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_mode) self.game_mode_label.grid(row=0, column=0, sticky='nsew') self.game_map_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_map) self.game_map_label.grid(row=0, column=1, sticky='nsew') self.game_number_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_number) self.game_number_label.grid(row=1, column=0, sticky='nsew') self.game_duration_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_duration) self.game_duration_label.grid(row=1, column=1, sticky='nsew') def display_info(self): self.master.game = self.master.watcher.game.serialize() game = self.master.game self.game_number.set(f'Game #{game["number"]}') self.game_map.set(f'Map: {game["map"]}') self.game_mode.set(f'Mode: {game["mode"]}') if game['start_time']: self.game_duration.set( f'Game began {time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(game["start_time"]))}' ) elif game['duration']: self.game_duration.set(f'Game completed in {game["duration"]} seconds') self.build_team_frames(game) def build_team_frames(self, game): color_order = ['RED', 'BLUE', 'YELLOW', 'GREEN'] if hasattr(self, 'teams_frame'): self.teams_frame.destroy() self.teams_frame = tk.Frame(self, bg=BG[1]) self.teams_frame.grid(row=1, column=0, sticky='nsew') team_len = len(game['teams']) config_grids(self.teams_frame, rows=[1]*team_len, columns=[1]) game['teams'].sort(key=lambda team: color_order.index(team['color'])) team_frames = [] for team_index, team in enumerate(game['teams']): hex_color = ut.rgb_to_hex(ut.COLORS['CARDS'][team['color']]) team_frames.append(TeamFrame(self.teams_frame, team, bg=hex_color)) team_frames[team_index].grid(row=team_index, column=0, sticky='nsew', pady=(0, 10)) class WatcherFrame(tk.Frame): def __init__(self, master, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master config_grids(self, rows=[0, 0], columns=[1]) self.toggle_watcher_button = tk.Button( self, bg=FG[1], fg=BG[1], bd=0, text='Start watcher', command=self.toggle_watcher ) self.toggle_watcher_button.grid(row=0, column=0, sticky='ew', pady=(0, 5)) self.watcher_status = tk.Label(self, text='Watcher stopped', bg=BG[0], fg=FG[3]) self.watcher_status.grid(row=1, column=0, sticky='ew') def toggle_watcher(self): if self.master.watcher.isAlive(): # STOP self.master.watcher_queue.put('quit') self.master.watcher.join() self.toggle_watcher_button.config(text='Start watcher') self.watcher_status.config(text='Watcher stopped', fg=FG[3]) else: # START self.master.watcher = smash_watcher.Watcher(self.master.watcher_queue, self.master.queue) self.master.watcher.start() self.toggle_watcher_button.config(text='Stop watcher') self.watcher_status.config(fg=FG[6]) self.master.game_frame.destroy() self.master.game_frame = GameFrame(self.master, bg=BG[1]) self.master.game_frame.grid(row=1, column=0, sticky='nsew', padx=10, pady=10) class Window(tk.Frame): def __init__(self, master, *args, **kwargs): super().__init__(*args, **kwargs) self.master = master self.watcher = None self.cont = True self.queue = Queue() self.watcher_queue = Queue() self.character_name_debugging_enabled = False self.watcher = smash_watcher.Watcher(self.watcher_queue, self.queue) self.watcher.daemon = True self.game = None self.restart_flag = False self.pack(fill=tk.BOTH, expand=True) self.master.title(TITLE) config_grids(self, rows=[0, 1], columns=[1]) self.game_frame = GameFrame(self, bg=BG[1]) self.game_frame.grid(row=1, column=0, sticky='nsew', padx=10, pady=10) self.watcher_frame = WatcherFrame(self, bg=BG[0]) self.watcher_frame.grid(row=0, column=0, sticky='nsew', padx=10, pady=10) self.menubar = Menubar(self) self.master.config(menu=self.menubar) self.loop() def loop(self): if self.cont: self.check_queue() self.master.after(100, self.loop) def check_queue(self): try: item = self.queue.get(block=False) if item == 'update': self.game_frame.display_info() if 'status' in item: self.watcher_frame.watcher_status.config(text=item['status']) except Empty: pass def quit(self): self.cont = False self.master.destroy() def restart(self): self.quit() self.restart_flag = True def character_name_debugging(self): if not self.character_name_debugging_enabled: self.watcher.lock(1) smash_game.character_name_debugging_enabled = True else: self.watcher.unlock() smash_game.character_name_debugging_enabled = False self.character_name_debugging_enabled = not self.character_name_debugging_enabled def click_spectate(self): self.watcher.game.cancelled = 'DEBUG' def run_gui(): root = tk.Tk() root.geometry('540x550') window = Window(root, bg=BG[0]) if ut.SETTINGS['AUTO_START_WATCHER'].lower() == 'true': window.watcher_frame.toggle_watcher() root.mainloop() if window.watcher.isAlive(): window.watcher_queue.put('quit') window.watcher.join() if window.restart_flag: system = platform.system() if system == 'Windows': os.system(__file__) if system == 'Linux': os.system('python3 ' + __file__) def headless(): queue = Queue() watcher_queue = Queue() watcher = smash_watcher.Watcher(watcher_queue, queue) watcher.start() _input = '' while _input not in ['stop', 'exit', 'quit']: _input = input('>: ') key_capture.put('quit') key_capture.join() watcher_queue.put('quit') watcher.join() if __name__ == '__main__': print(f'\n\n{"*" * 40} {TITLE} {"*" * 40}') print(f'<<<{datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")}>>>') if len(argv): if '-nogui' in argv: headless() else: run_gui()
/smash_reader/smash_game.py
import copy import difflib import json from logger import log_exception import numpy as np import os from PIL import Image import re import smash_utility as ut import sys import threading import time sys.excepthook = log_exception character_name_debugging_enabled = False output = True def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<Game>') print(*args, **kwargs) CARD_WIDTH = 398 STOCK_SPACING = 26 with open('fighter_list.json', 'r') as infile: CHARACTER_NAMES = json.load(infile) CHARACTER_NAMES = [name.lower() for name in CHARACTER_NAMES] BASE_DIR = os.path.realpath(os.path.dirname(__file__)) CHARACTER_NAME_FIXES = { 'lemmy': 'lenny', 'lemmv': 'lenny' } MAP_NAME_FIXES = { 'Figure-S': 'Figure-8', 'HiII': 'Hill' } class ImageProcessor(threading.Thread): def __init__(self): pass class Player: def __init__(self): self.player_name_image = [] self.character_name = '' self.number = 0 self.gsp = 0 self.stock_template_image = [] self.stock_count = 0 def serialize(self, images_bool=True): _copy = copy.copy(self) img = _copy.player_name_image.tolist() for i, row in enumerate(img): img[i] = [int(bool(pixel)) for pixel in img[i]] if not images_bool: _copy.player_name_image = None _copy.stock_template_image = None else: if len(_copy.player_name_image): _copy.player_name_image = _copy.player_name_image.tolist() if len(_copy.stock_template_image): _copy.stock_template_image = _copy.stock_template_image.tolist() return _copy.__dict__ def read_card(self, card): self.get_character_name(card) self.crop_player_name(card) self.read_number(card) self.read_gsp(card) # @ut.time_this def get_character_name(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['CHARACTER_NAME']) pils = ut.stencil(crop) pil = pils[-1] template_name, sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES']) if sim > 95: self.character_name = re.match('(.+)(-\d*)', template_name).group(1) else: name_as_read = ut.read_image(pil).lower() if name_as_read in CHARACTER_NAME_FIXES: name_as_read = CHARACTER_NAME_FIXES[name_as_read] name = difflib.get_close_matches(name_as_read, CHARACTER_NAMES, n=1) if len(name): name = name[0] if character_name_debugging_enabled: _template_name, _sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES_DUMP']) if _sim < 99: num = 1 for _name in ut.TEMPLATES['CHARACTER_NAMES_DUMP']: _print(name, _name) if name in _name: num += 1 filename = f'{name}-{num}.png' path = os.path.join(BASE_DIR, 'templates', 'character_names_dump', filename) pil.save(path) self.character_name = name else: self.character_name = '...' template, sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES'], thresh=95) if sim >= 95: self.character_name = template.split('-')[0] else: template, sim = ut.find_most_similar(pil, ut.TEMPLATES['UNREADABLE'], thresh=95) if sim < 95: nums = list(ut.TEMPLATES['UNREADABLE'].keys()) if len(nums): nums.sort(key=lambda num: int(num), reverse=True) num = int(nums[0]) + 1 else: num = 1 filename = f'{num}.png' ut.TEMPLATES['UNREADABLE'][num] = pil pil.save(os.path.join(ut.TEMPLATES_DIR, 'unreadable', filename)) _print(f'{name_as_read.rjust(30)} --> {self.character_name}') if False: for i, img in enumerate(pils): img.save(f'misc/character_names/{self.character_name}-{i}.png') # @ut.time_this def crop_player_name(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['NAME']) img, self.player_name_image = ut.convert_to_bw(crop, 120, False) # @ut.time_this def read_number(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['NUMBER']) # crop.save(f'{time.time()}.png') templates = {t:ut.TEMPLATES['LOBBY'][t] for t in ut.TEMPLATES['LOBBY'] if re.match('P\d+', t)} template_name, sim = ut.find_most_similar(crop, templates) num = int(os.path.splitext(template_name)[0].split('P')[1]) # pil, arr = convert_to_bw(crop, 1, False) # num = read_image(pil, 'player_number')[-1] # self.number = int(num) self.number = num # @ut.time_this def read_gsp(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['GSP']) text = ut.read_image(crop, 'gsp') self.gsp = int(text.replace(',', '')) class Team: def __init__(self, color): self.color = color self.players = [] self.gsp_total = 0 self.placement = '' def serialize(self, images_bool=True): players = [player.serialize(images_bool) for player in self.players] _copy = copy.copy(self) _copy.players = players return _copy.__dict__ def add_player(self, player): self.players.append(player) self.gsp_total += player.gsp class Game: def __init__(self, num=1): self.number = num self.mode = '' self.map = '' self.team_mode = False self.teams = [] self.player_count = 0 self.winning_color = '' self.start_time = 0 self.duration = 0 self.cancelled = '' self.colors_changed = False def serialize(self, images_bool=True): teams = [team.serialize(images_bool) for team in self.teams] _copy = copy.copy(self) _copy.teams = teams return _copy.__dict__ def load(self, data): self.__dict__.update(data) def read_card_screen(self, card_screen): self.read_basic_info(card_screen) self.read_cards(card_screen) @ut.time_this def read_basic_info(self, screen): crop = screen.crop(ut.COORDS['LOBBY']['GAME_INFO']) text = ut.read_image(crop) splits = text.split(' / ') self.mode = splits[0] self.map = splits[1] for map_str in MAP_NAME_FIXES: if map_str in self.map: self.map.replace(map_str, MAP_NAME_FIXES[map_str]) @ut.time_this def read_cards(self, screen): # screen.save('screen.png') id_slice = screen.crop(ut.COORDS['LOBBY']['CARDS_SLICE_IDS']) pil, cv = ut.convert_to_bw(id_slice, threshold=220, inv=False) # pil.save('slice.png') color_slice = screen.crop(ut.COORDS['LOBBY']['CARDS_SLICE_COLORS']) id_arr = np.asarray(pil) color_arr = np.asarray(color_slice) players = [] skip = 0 id_pixels = [p for row in id_arr for p in row] color_pixels = [p for row in color_arr for p in row] players = [] for i, id_pixel in enumerate(id_pixels): if skip: skip -= 1 elif id_pixel == 255: card_boundary = (i - 62, 375, i + 341, 913) crop = screen.crop(card_boundary) color = ut.match_color(arr=color_pixels[i - 5], mode='CARDS')[0] player = Player() player.read_card(crop) if player.character_name == '...': _print('GAME CANCELLED DUE TO UNREADABLE CHARACTER NAME') self.cancelled = 'UNREADABLE_CHARACTER_NAME' ut.send_command('b') else: players.append(player.character_name) self.player_count += 1 team = next((t for t in self.teams if t.color == color), None) if not team: team = Team(color) self.teams.append(team) team.add_player(player) skip = 340 if len(self.teams) == 2 and self.player_count > 2: self.team_mode = True elif len(set(players)) < len(players): _print('GAME CANCELLED DUE TO DUPLICATE CHARACTER IN FFA') self.cancelled = 'DUPLICATE_CHARACTER' ut.send_command('b') def read_start_screen(self, screen): time.sleep(1) screen = ut.capture_screen() if not self.team_mode and not self.cancelled: self.colors_changed = self.fix_colors(screen) if self.mode == 'Stock': # self.get_stock_templates(screen) pass elif self.mode == 'Time': pass elif self.mode == 'Stamina': pass else: _print(f'unknown mode: {self.mode}') # @ut.time_this def get_stock_templates(self, screen): stocks = [] for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: stock_template_coords = list(ut.COORDS['GAME']['PLAYER']['STOCK_TEMPLATE']) stock_template_coords[0] = edge - stock_template_coords[0] stock_template_coords[2] = edge - stock_template_coords[2] template = screen.crop(stock_template_coords) player_stock_count = 1 while True: stock_template_coords[0] += STOCK_SPACING stock_template_coords[2] += STOCK_SPACING crop = screen.crop(stock_template_coords) sim = ut.avg_sim(crop, template) if sim > 95: player_stock_count += 1 else: break def fix_colors(self, screen): info = self.get_character_details_game(screen) players = [player for team in self.teams for player in team.players] _players = copy.copy(players) _teams = [] _print('Fixing colors:') for i, character_info in enumerate(info): name, color = character_info player = next((p for p in players if p.character_name == name), None) team = Team(color) team.add_player(player) _teams.append(team) _print(f'\t{team.color} - {player.character_name}') for team in self.teams: color = team.color character_name = team.players[0].character_name _team = next((t for t in _teams if t.color == color), None) if not _team or _team.players[0].character_name != character_name: self.teams = _teams return True return False def get_character_templates_lobby(self, screen): characters = [] for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: char_template_coords = list(ut.COORDS['GAME']['PLAYER']['CHARACTER_TEMPLATE']) char_template_coords[0] = edge - char_template_coords[0] char_template_coords[2] = edge - char_template_coords[2] template = screen.crop(char_template_coords) template.save(f'{time.time()}.png') def get_character_templates_game(self, screen): characters = [] for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: char_template_coords = list(ut.COORDS['GAME']['PLAYER']['CHARACTER_TEMPLAT']) char_template_coords[0] = edge - char_template_coords[0] char_template_coords[2] = edge - char_template_coords[2] template = screen.crop(char_template_coords) template.save(f'{time.time()}.png') def get_character_details_game(self, screen): info = [] rerun = True while rerun: for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: color_coords = list(ut.COORDS['GAME']['PLAYER']['COLOR']) color_coords[0] = edge - color_coords[0] color_coords[2] = edge - color_coords[2] color_pixel = screen.crop(color_coords) color, _ = ut.match_color(pixel=color_pixel, mode='GAME') char_template_coords = list(ut.COORDS['GAME']['PLAYER']['NAME']) char_template_coords[0] = edge - char_template_coords[0] char_template_coords[2] = edge - char_template_coords[2] template = screen.crop(char_template_coords) bw, _ = ut.convert_to_bw(template) name_as_read = ut.read_image(bw).lower() if name_as_read: rerun = False if name_as_read in CHARACTER_NAME_FIXES: name_as_read = CHARACTER_NAME_FIXES[name_as_read] name = difflib.get_close_matches(name_as_read, CHARACTER_NAMES, n=1) if len(name): _print(f'{name_as_read.rjust(30)} --> {name}') info.append((name[0], color)) else: trainer_names = ['squirtle', 'charizard', 'ivysaur'] name = difflib.get_close_matches(name_as_read, trainer_names, n=1) if len(name): info.append(('pokémon trainer', color)) else: _print(f'Can\'t read <{name_as_read}>') # template.show() # template.save(f'{time.time()}.png') else: _print(f'Can\'t read <{name_as_read}>') return info def wait_for_go(self): coords = ut.COORDS['GAME'][''] template = ut.TEMPLATES['IDS']['FIGHT_START'] screen = ut.capture_screen() crop = screen.crop(coords) while ut.avg_sim(crop, template) > 85: screen = ut.capture_screen() crop = screen.crop(coords) time.sleep(0.1) self.start_time = time.time() def read_end_screen(self, screen): pass def read_results_screen(self, screen): if self.team_mode: coords = ut.COORDS['FINAL']['VICTORY_TEAM'] templates = ut.TEMPLATES['FINAL'] crop = screen.crop(coords) sim_template = ut.find_most_similar(crop, templates) color = sim_template[0].split('_')[0] self.winning_color = color _print(self.winning_color) else: coords = ut.COORDS['FINAL'] first_place_pixel = screen.crop(coords['VICTORY_PLAYER']) self.winning_color, sim = ut.match_color(pixel=first_place_pixel, mode='RESULTS') _print(self.winning_color) team = next((t for t in self.teams if t.color == self.winning_color), None) team.placement = '1st' # print(self.serialize())
/smash_reader/smash_utility.py
import cv2 from datetime import datetime import json from logger import log_exception import matplotlib.pyplot as plt import mss import numpy as np from PIL import Image, ImageChops, ImageDraw import pytesseract import random import requests from skimage.measure import compare_ssim import string import subprocess import os import sys import time sys.excepthook = log_exception output = True def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<Utility>') print(*args, **kwargs) BASE_DIR = os.path.realpath(os.path.dirname(__file__)) TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates') override_path = os.path.join(BASE_DIR, 'index.txt') if os.path.isfile(override_path): with open(override_path, 'r') as infile: MONITOR_INDEX = int(infile.read()) else: MONITOR_INDEX = 1 COORDS = { 'LOBBY': { 'BASIC_ID': (145, 32, 321, 70), 'FLAGS_ID': (394, 291, 1525, 433), 'CARDS_ID': (671, 152, 1247, 188), 'GAME_INFO': (302, 217, 1443, 253), 'CHARACTER_TEMPLATE': (144, 126, 206, 218), 'CARDS_SLICE_IDS': (0, 877, 1920, 878), 'CARDS_SLICE_COLORS': (0, 813, 1920, 814), 'PLAYER': { 'TEAM_COLOR': (17, 458, 18, 459), 'CHARACTER_NAME': (0, 367, 396, 430), 'NAME': (129, 436, 389, 475), 'NUMBER': (37, 441, 82, 471), 'GSP': (131, 490, 384, 526) } }, 'GAME': { 'TIMER_PREGAME': (1722, 61, 1798, 89), 'TIMER_VISIBLE': (1703, 63, 1715, 95), 'TIMER_MILLI': ( (1823, 70, 1831, 92), (1850, 70, 1858, 92) ), 'TIMER_MINUTE': (1675, 54, 1686, 91), 'TIMES_UP': (465, 299, 1451, 409), 'SUDDEN_DEATH': (340, 172, 1602, 345), 'END_ID': (411, 462, 1481, 522), 'PLAYER': { 'INFO': { 2: (712, 1451), 3: (457, 1081, 1705), 4: (491, 899, 1307, 1715) }, 'STOCK_TEMPLATE': (223, 1045, 221, 1059), 'CHARACTER_TEMPLATE': (272, 950, 242, 1020), 'NAME': (182, 1007, 0, 1025), 'COLOR': (5, 1003, 4, 1004) } }, 'FINAL': { 'ID': ( (468, 49, 550, 296), (204, 388, 286, 635) ), 'ID2': (1825, 0, 1864, 73), 'VICTORY_TEAM': (745, 870, 833, 978), 'VICTORY_PLAYER': (125, 168, 126, 169), '2ND_PLACE': (525, 982, 526, 983), '2ND_PLACE_2_PLAYER': (690, 984, 691, 985), '3RD_PLACE': (1072, 1003, 1073, 1004), '4TH_PLACE': (1492, 1013, 1493, 1014) }, 'MENU': { 'FAILED_TO_PLAY_REPLAY': (724, 408, 1185, 485), 'SPECTATE_SELECTED': (979, 458, 1586, 606) } } COLORS = { 'CARDS':{ 'RED': (250, 52, 52), 'BLUE': (43, 137, 253), 'YELLOW': (248, 182, 16), 'GREEN': (35, 179, 73) }, 'GAME': { 'RED': (255, 42, 40), 'BLUE': (31, 141 ,255), 'YELLOW': (255, 203, 0), 'GREEN': (22, 193, 64) }, 'RESULTS': { 'RED': (240, 159, 163), 'BLUE': (125, 206, 254), 'YELLOW': (255, 244, 89), 'GREEN': (141, 212, 114) } } folders = [f for f in os.listdir(TEMPLATES_DIR) if os.path.isdir(os.path.join(TEMPLATES_DIR, f))] TEMPLATES = {f.upper():{} for f in folders} for root, dirs, files in os.walk(TEMPLATES_DIR, topdown=False): for file in files: path = os.path.join(root, file) name = os.path.splitext(file)[0] _type = os.path.split(root)[1].upper() if _type in TEMPLATES: TEMPLATES[_type][name] = Image.open(path) else: TEMPLATES[_type] = {name: Image.open(path)} def save_settings(settings): lines = [f'{k}={v}' for k, v in settings.items()] open('settings.txt', 'w+').write('\n'.join(lines)) def load_settings(): path = os.path.join(BASE_DIR, 'settings.txt') if os.path.isfile(path): lines = open(path, 'r').read().splitlines() settings = {} for line in lines: k, v = line.split('=') settings[k] = v else: key_path = os.path.join(BASE_DIR, 'key.txt') key = '' if os.path.isfile(key_path): key = open(key_path, 'r').read().splitlines()[0] os.remove(key_path) settings = { 'API_KEY': key, 'POST_URL': 'https://www.smashbet.net/reader_post/', 'AUTO_START_WATCHER': 'true' } save_settings(settings) return settings SETTINGS = load_settings() ##################################################################### ############################# DECORATORS ############################ ##################################################################### def time_this(func): def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() duration = end_time - start_time dur_str = '{:.2f}'.format(duration) _print(f'function: {func.__name__}() executed in {dur_str} seconds') return result return wrapper # Make sure function runs at least as long as the set interval def pad_time(interval): def outer(func): def inner(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() duration = end_time - start_time delta = interval - duration if delta > 0: # print(f'padding {delta} seconds') time.sleep(delta) else: # print(f'detection has fallen behind by [{"{:.2f}".format(delta)}] seconds') pass return result return inner return outer ##################################################################### ########################## IMAGE CAPTURING ########################## ##################################################################### def save_frames(vid_path, framerate=None): print('saving template in 5 seconds') time.sleep(5) vid_cap = cv2.VideoCapture(vid_path) success = True frame_index = 0 while success: vid_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index) success, image = vid_cap.read() _print(f'Read frame {frame_index}: ', success) cv2.imwrite(f'frame{frame_index}.png', image) # save frame as JPEG file frame_index += 30 # @time_this def capture_screen(monitor_index=MONITOR_INDEX): with mss.mss() as sct: monitor_count = len(sct.monitors) if monitor_index > monitor_count: monitor_index = monitor_count monitor = sct.monitors[monitor_index] sct_img = sct.grab(monitor) pil_img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') return pil_img def capture_cards_id(): coords = COORDS['LOBBY']['CARDS_ID'] cap = capture_screen() crop = cap.crop(coords) if 'CARDS_ID' in TEMPLATES['LOBBY']: del TEMPLATES['LOBBY']['CARDS_ID'] crop.save(os.path.join(TEMPLATES_DIR, 'lobby', 'CARDS_ID.png')) TEMPLATES['LOBBY']['CARDS_ID'] = crop ##################################################################### ########################## IMAGE PROCESSING ######################### ##################################################################### def read_image(image, config_type='basic'): configs = { 'basic': '--psm 6 --oem 3', 'gsp': '--psm 8 --oem 3 -c tessedit_char_whitelist=0123456789,', 'player_number': '--psm 8 --oem 3 -c tessedit_char_whitelist=p1234' } text = pytesseract.image_to_string(image, config=configs[config_type]) return text def convert_to_bw(pil_img, threshold=127, inv=True): cv_img = np.array(pil_img) try: img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY) if inv: method = cv2.THRESH_BINARY_INV else: method = cv2.THRESH_BINARY thresh, array_bw = cv2.threshold(img_gray, threshold, 255, method) pil_bw = Image.fromarray(array_bw) return pil_bw, array_bw except cv2.error: return pil_img, cv_img def find_most_similar(sample, templates, thresh=0): high_sim = ['', 0] for template_name in templates: sim = avg_sim(sample, templates[template_name]) if sim > high_sim[1]: high_sim = [template_name, sim] if thresh and sim > thresh: return high_sim return high_sim def compare_chops(sample, template, true_color=False): if sample.size == template.size: copy1 = sample.resize((64, 64)) copy2 = template.resize((64, 64)) if not true_color: copy1, arr1 = convert_to_bw(copy1) copy2, arr2 = convert_to_bw(copy2) diff = ImageChops.difference(copy1, copy2) arr = np.asarray(diff) total = 0 different = 0 for row in arr: for pixel in row: total += 1 if isinstance(pixel, (int, np.uint8)): if pixel == 255: different += 1 else: for color in pixel: different += (color / 255) sim = ((1 - (different/total)) * 100) return sim return 0 def compare_skim(sample, template, true_color=False): if sample.size == template.size: copy1 = sample.resize((64, 64)) copy2 = sample.resize((64, 64)) if not true_color: try: sample = cv2.cvtColor(np.array(sample), cv2.COLOR_BGR2GRAY) except cv2.error: sample = np.array(sample) try: template = cv2.cvtColor(np.array(template), cv2.COLOR_BGR2GRAY) except cv2.error: template = np.array(template) # Image is already b&w sim, diff = compare_ssim(sample, template, full=True, multichannel=True) return sim * 100 return 0 def area_sim(cap, screen, area): template = TEMPLATES[screen][area] coords = COORDS[screen][area] if not isinstance(coords[0], (list, tuple)): coords = [coords] high_sim = 0 for coord in coords: crop = cap.crop(coord) sim = avg_sim(crop, template) if sim > high_sim: high_sim = sim return high_sim def avg_sim(sample, template, true_color=False): comp_funcs = (compare_chops, compare_skim) sims = [comp_func(sample, template, true_color) for comp_func in comp_funcs] avg = sum(sims) / len(sims) return avg def match_color(pixel=None, arr=[], mode=None): best_match = ('', 0) if not mode: _print('mode required for color match') return best_match if pixel: sample = [rgb for row in np.asarray(pixel) for rgb in row][0] elif any(arr): sample = arr else: _print('no sample') return best_match colors = COLORS[mode] for color_name in colors: diff = 0 for sv, tv in zip(sample, colors[color_name]): diff += abs(sv - tv) sim = 100 - ((diff / 765) * 100) if sim > best_match[1]: best_match = (color_name, sim) return best_match def stencil(crop): w_pil, w_arr = convert_to_bw(crop, 254, inv=False) b_pil, _ = convert_to_bw(crop, 1, inv=False) b_fil = b_pil.copy() fill_border(b_fil) b_arr = np.array(b_fil) result = [] for r1, r2 in zip(w_arr, b_arr): r = [] for p1, p2 in zip(r1, r2): if int(p1) and int(p2): r.append(0) else: r.append(255) result.append(r) arr = np.array(result) img = Image.fromarray(arr.astype('uint8')) imgs = [crop, w_pil, b_pil, b_fil, img] return imgs def fill_border(img): while True: arr = np.array(img) row_count = len(arr) for row_i, row in enumerate(arr): col_count = len(row) for p_i, p in enumerate(row): if int(p): if row_i == 0 or row_i == row_count \ or p_i == 0 or p_i == col_count: ImageDraw.floodfill(img, (p_i, row_i), 0) continue break def filter_color(image, color): color = np.uint8([[color]]) hsv = cv2.cvtColor(color, cv2.COLOR_RGB2HSV) darker = np.array([hsv[0][0][0] - 10, 50, 50]) lighter = np.array([hsv[0][0][0] + 10, 360, 360]) image = np.asarray(image) hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) mask = cv2.inRange(hsv, darker, lighter) result = cv2.bitwise_and(image, image, mask=mask) return result def filter_color2(img, coords): arr = np.array(img) x, y = coords pixel = list(arr[y][x]) result = [] for row in arr: r = [] for p in row: if list(p) == pixel: r.append(255) else: r.append(0) result.append(r) return result def rgb_to_hex(rgb): return '#%02x%02x%02x' % rgb ##################################################################### ################################ MISC ############################### ##################################################################### def simplify_players(game): players = [] for team in game['teams']: color = team['color'] for player in team['players']: keys = list(player.keys()) for key in keys: if not player[key]: del player[key] if 'character_name' in player: player['character_name'] = player['character_name'].title() player['color'] = color players.append(player) return players def filter_game_data(game, mode): simple_game = {'reader_mode': mode} if mode == 1: simple_game['players'] = simplify_players(game) simple_game['map'] = game['map'] simple_game['team_mode'] = game['team_mode'] simple_game['game_mode'] = game['mode'] simple_game['cancelled'] = game['cancelled'] if mode == 2: if not game['team_mode']: simple_game['colors_changed'] = game['colors_changed'] if game['colors_changed']: for team in game['teams']: simple_game['players'] = simplify_players(game) if mode == 3: simple_game['start_time'] = -1 if mode == 4: simple_game['end_time'] = -1 if mode == 5: simple_game['winning_team'] = game['winning_color'] return simple_game def post_data(data={}): key = SETTINGS['API_KEY'] URL = SETTINGS['POST_URL'] DATA = { 'API_KEY': key, 'data': data } try: r = requests.post(url=URL, json=DATA) return r except requests.exceptions.ConnectionError: print('Unable to reach REST API') return None def dump_image_data(arr): filepath = os.path.join(BASE_DIR, 'img_dump.json') if os.path.isfile(filepath): with open(filepath, 'r') as infile: data = json.load(infile) else: data = [] data.append({time.time(): arr}) with open(filepath, 'w+') as outfile: json.dump(data, outfile) def clear_console(): try: none = os.system('cls') except: pass try: none = os.system('clear') except: pass def save_game_data(game): data = load_game_data() data.append(game) with open('games.json', 'w+') as outfile: json.dump(data, outfile, separators=(',',':')) def load_game_data(): path = os.path.join(BASE_DIR, 'games.json') if os.path.isfile(path): try: with open(path, 'r') as infile: return json.load(infile) except json.decoder.JSONDecodeError: pass return [] def send_command(btn): _print('PRESS', btn) os.system(f'PIGPIO_ADDR=raspberrypi.local python3 /home/badgerlord/Desktop/{btn}.py') def random_str(l=10): """Generate a random string of letters, digits and special characters """ password_characters = string.ascii_letters + string.digits return ''.join(random.choice(password_characters) for i in range(l))
/smash_reader/smash_watcher.py
import json from logger import log_exception import os from queue import Empty import re import requests import smash_game import smash_utility as ut import sys import threading import time sys.excepthook = log_exception output = True def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<Watcher>') print(*args, **kwargs) class Watcher(threading.Thread): def __init__(self, watcher_queue, gui_queue): # print('\n') super().__init__() self.queue = watcher_queue self.gui_queue = gui_queue self.id_coords = [ ('LOBBY', 'FLAGS_ID'), ('LOBBY', 'CARDS_ID'), (), (), ('GAME', 'END_ID'), ('FINAL', 'ID'), ('FINAL', 'ID2') ] self.locked = False self.reset() # Game finished or cancelled def reset(self): if not self.locked: self.current_type_index = 0 self.list_limit = 3 self.sim_lists = [[0] * self.list_limit for _ in range(len(self.id_coords))] self.cont = True self.current_game_num = len(ut.load_game_data()) + 1 self.game = smash_game.Game(self.current_game_num) self.timer_detected = False self.timer_visible = False self.timer_running = False self.timer_running_templates = (None, None) self.timer_sim_hits = 0 # Starts when watcher is created and loops forever def run(self): _print('Watching for flags') self.gui_queue.put({'status': 'Watching for flag screen'}) while self.cont: timer_vis_sim = 0 timer_milli_sim = 0 self.cap = ut.capture_screen() crop = self.cap.crop(ut.COORDS['MENU']['FAILED_TO_PLAY_REPLAY']) if ut.avg_sim(crop, ut.TEMPLATES['MENU']['FAILED_TO_PLAY_REPLAY']) >= 95: self.game.cancelled = 'REPLAY_FAILED' time.sleep(5) ut.send_command('a') if self.game.cancelled: self.reset() if not self.locked: self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for menu screen'}) self.watch_for_menu() if not self.locked: self.gui_queue.put({'status': 'Watching for flag screen'}) # check timer visibility and movement, set class variables if self.current_type_index >= 2: timer_vis_sim = self.check_timer_visibility() timer_milli_sim = 0 if self.timer_detected: timer_milli_sim = self.check_timer_movement() # look for the timer at the beginning if self.current_type_index == 2: if self.timer_detected: _print(f'timer detected: {timer_vis_sim}') self.read_screen_data() # wait for the timer to start moving elif self.current_type_index == 3: if self.timer_running: _print(f'timer movemement detected: {timer_milli_sim}') self.read_screen_data() # check to see if the timer is stopped, or the "GAME" text is # detected, or the results screen is detected elif self.current_type_index == 4: if self.check_screen_basic() > 90: # pass because read_screen_data will be called if True # and the rest of the checks will be skipped pass else: # Timer stopped if not self.timer_running: self.read_screen_data() # Results screen detected else: checks = [ self.check_screen_basic(index=5, normal=False), self.check_screen_basic(index=6, normal=False) ] if sum(checks) / 2 > 80: # run twice because the match end screen was missed self.read_screen_data() self.read_screen_data() # check for current basic template (flags, cards, results) else: self.check_screen_basic() self.check_queue() time.sleep(0.1) def check_queue(self): if self.queue: try: item = self.queue.get(block=False) if item == 'quit': self.cont = False except Empty: pass def lock(self, index): self.current_type_index = index - 1 self.read_screen_data() self.locked = True def unlock(self): self.locked = False self.reset() def watch_for_menu(self): templates = [ ut.TEMPLATES['MENU']['SPECTATE_SELECTED'], ut.TEMPLATES['LOBBY']['FLAGS_ID'] ] while self.cont: cap = ut.capture_screen() self.check_queue() crop = cap.crop(ut.COORDS['MENU']['SPECTATE_SELECTED']) if ut.avg_sim(crop, templates[0]) > 95: time.sleep(5) ut.send_command('a') break crop = cap.crop(ut.COORDS['LOBBY']['FLAGS_ID']) if ut.avg_sim(crop, templates[1]) > 95: break ut.send_command('a') time.sleep(2) # @ut.pad_time(0.20) def check_screen_basic(self, index=-1, normal=True, screen=None, area=None): if index == -1: index = self.current_type_index if not screen and not area: screen, area = self.id_coords[index] sim = ut.area_sim(self.cap, screen, area) l = self.sim_lists[index] l.insert(0, sim) del l[-1] avg = sum(l) / len(l) if avg > 90: _print(f'Screen type {{{index}}} sim: {avg}') if normal: l = [0] * self.list_limit self.read_screen_data() return avg def check_timer_visibility(self): timer_vis_crop = self.cap.crop(ut.COORDS['GAME']['TIMER_VISIBLE']) template = ut.TEMPLATES['GAME']['TIMER_VISIBLE'] timer_vis_sim = ut.avg_sim(timer_vis_crop, template) if timer_vis_sim > 95: # _print(f'timer vis sim: {timer_vis_sim}') if not self.timer_detected: self.timer_detected = True self.timer_visible = True else: self.timer_visible = False return timer_vis_sim def check_timer_movement(self): timer_sim = 0 if self.timer_visible: coords = ut.COORDS['GAME']['TIMER_MILLI'] crops = [self.cap.crop(coord) for coord in coords] # [crop.show() for crop in crops] if all(self.timer_running_templates): timer_sim = sum([ut.avg_sim(t, c) for t, c in zip(self.timer_running_templates, crops)]) / 2 # for i, crop in enumerate(crops): # timer_sim = ut.avg_sim(crop, self.timer_running_templates[i]) / (i + 1) if timer_sim > 90: _print(f'timer sim: {timer_sim}') self.timer_sim_hits += 1 if self.timer_sim_hits >= 3: if self.timer_running: # self.read_screen_data() self.timer_running = False else: self.timer_running = True self.timer_sim_hits = 0 self.timer_running_templates = crops return timer_sim def battle_watcher(self): pass def filter_and_post(self, game): data = { 'game': ut.filter_game_data( game, self.current_type_index ), 'mode': self.current_type_index } ut.post_data(data) def read_screen_data(self): qp = lambda: self.filter_and_post(self.game.serialize(images_bool=False)) # Flags if self.current_type_index == 0: self.gui_queue.put('update') _print('Flags detected') self.gui_queue.put({'status': 'Watching for card screen'}) # Cards if self.current_type_index == 1: _print('Cards detected') self.gui_queue.put({'status': 'Reading cards'}) time.sleep(1) self.cap = ut.capture_screen() self.game.read_card_screen(self.cap) qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle pregame'}) # Pregame if self.current_type_index == 2: _print('Battle pregame detected') self.game.read_start_screen(self.cap) qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle start'}) # Game started if self.current_type_index == 3: _print('Battle start detected') qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle end'}) # Game ended if self.current_type_index == 4: _print('Battle end detected') qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle results'}) # Results if self.current_type_index == 5: _print('Battle results detected') self.game.read_results_screen(self.cap) qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for flag screen'}) # ut.save_game_data(self.game.serialize()) if not self.locked: self.current_type_index += 1 if self.current_type_index >= 6: self.reset() _print(f'Mode changed to {self.current_type_index}') # _print(json.dumps(self.game.serialize(), separators=(',', ': ')))
/smash_reader/tests.py
import argparse import cv2 import difflib import json import matplotlib.pyplot as plt import mss import numpy as np import os import re import requests import select import smash_game import smash_utility as ut import socket import struct import threading from queue import Empty, Queue #from matplotlib import pyplot as plt from PIL import Image, ImageChops, ImageDraw BASE_DIR = os.path.realpath(os.path.dirname(__file__)) CAPTURES_DIR = os.path.join(BASE_DIR, 'captures') if not os.path.isdir(CAPTURES_DIR): os.mkdir(CAPTURES_DIR) def post_fake(data={'mode': 1, 'game': {'players': []}}): ut.post_data(data) def test_pixel(): img = Image.open('1560221662.467294.png') img = ut.filter_color2(img, (0, 10)) p = plt.imshow(img) plt.show() def test_stencil(): img = Image.open('1560219739.917792.png') ut.stencil(img) def test_game_data(): with open('game_state.json', 'r') as infile: game = json.load(infile) ut.filter_game_data(game, 1) def req(message='No message'): URL = 'http://localhost:8000/reader_info/' DATA = { 'secret_code': 'Mj76uiJ*(967%GVr57UNJ*^gBVD#W4gJ)ioM^)', 'data': message } r = requests.post(url = URL, json = DATA) return r class KeyThread(threading.Thread): def __init__(self, *args, **kwargs): super().__init__() self.key = keyboard.KeyCode(char='g') def run(self): with keyboard.Listener(on_press=self.on_press) as listener: listener.join() def on_press(self, key): if key == self.key: print('test') def start_key_thread(): thread = KeyThread() thread.daemon = True thread.start() def fight_tester(): captures = os.listdir(CAPTURES_DIR) get_fight_num = lambda f: re.match('\d+', f).group() fight_nums = list({get_fight_num(f) for f in captures}) fight_nums.sort(key=lambda n: int(n)) # n = fight_nums[int(random.random() * len(fight_nums))] # n = '0001' modes = {} for i, n in enumerate(fight_nums[16:]): print(f'{"*" * 80}\n{n}') card_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.2.LOBBY_CARDS.png')) fight_start_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.3.FIGHT_START.png')) # fight_end_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.4.FIGHT_END.png')) # try: # fight_results_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.5.FIGHT_RESULTS_SOLO.png')) # except FileNotFoundError: # fight_results_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.5.FIGHT_RESULTS_TEAM.png')) game = smash_game.Game(1) game.read_card_screen(card_screen) if game.mode in modes: modes[game.mode].append(i) else: modes[game.mode] = [i] break for mode in modes: print(f'{mode}: {modes[mode]}') game.read_start_screen(fight_start_screen) print(game.serialize(images_bool=False)) # game.fix_colors(fight_start_screen) # game.read_end_screen(fight_end_screen) # game.read_results_screen(fight_results_screen) # print(str(game)) # with open('game.json', 'w+') as outfile: # json.dump(game.serialize(), outfile, separators=(',',':')) def crop_char_lobby(): cap = ut.capture_screen() game = smash_game.Game(1) game.player_count = 4 game.read_cards(cap) def crop_char_game(): cap = ut.capture_screen() game = smash_game.Game(1) game.player_count = 3 name_images = game.get_character_name_game(cap) for img in name_images: bw, _ = ut.convert_to_bw(img) name_as_read = ut.read_image(bw).lower() name = difflib.get_close_matches(name_as_read, smash_game.CHARACTER_NAMES, n=1) print(name) def filter(): plt.ion() while True: cap = ut.capture_screen() img = ut.filter_color(cap, [236, 236, 236]) plt.imshow(img) plt.pause(0.001) plt.show() def cropper(coord_name, name=None): coords = ut.COORDS['FINAL'][coord_name] capture = ut.capture_screen() crop = capture.crop(coords) if name: crop.save(f'{name}.png') else: return np.asarray(crop) # crop.show() def capture_screen(): with mss.mss() as sct: # Get rid of the first, as it represents the "All in One" monitor: #for num, monitor in enumerate(sct.monitors[1:], 1): monitor = sct.monitors[1] # Get raw pixels from the screen sct_img = sct.grab(monitor) # Create the Image img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') # The same, but less efficient: # img = Image.frombytes('RGB', sct_img.size, sct_img.rgb) num = 0 name = os.path.join(home, 'screens', f'{num}.png') while os.path.isfile(name): num += 1 name = os.path.join(home, 'screens', f'{num}.png') return img def get_stream(): port = 9999 # where do you expect to get a msg? bufferSize = 2048 # whatever you need s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('', port)) s.setblocking(0) if True: result = select.select([s],[],[]) msg = result[0][0].recv(bufferSize) print(msg) cap = ImageGrab.grab() cv2.imdecode(cap, flags=1) def get_stream2(): HOST = '' PORT = 9999 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print('Socket created') s.bind((HOST, PORT)) print('Socket bind complete') s.listen(10) print('Socket now listening') conn, addr = s.accept() while True: data = conn.recv(8192) nparr = np.fromstring(data, np.uint8) frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) cv2.imshow('frame', frame) time.sleep(2) def get_stream3(): MCAST_GRP = '224.1.1.1' MCAST_PORT = 9999 IS_ALL_GROUPS = True sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if IS_ALL_GROUPS: # on this port, receives ALL multicast groups sock.bind(('', MCAST_PORT)) else: # on this port, listen ONLY to MCAST_GRP sock.bind((MCAST_GRP, MCAST_PORT)) mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY) sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) while True: print(sock.recv(10240)) def get_stream_data(main_queue, image_queue): print('Getting stream data') cap = cv2.VideoCapture('udp://224.0.0.1:2424', cv2.CAP_FFMPEG) print(cap) if not cap.isOpened(): print('VideoCapture not opened') exit(-1) x = 0 while True: print('cap') image_queue.put(cap) print('put') item = get_queue(main_queue) if item == 'end': break cap.release() cv2.destroyAllWindows() def convert_to_bw(pil_img, threshold=127): cv_img = np.array(pil_img) img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY) thresh, array_bw = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY_INV) pil_bw = Image.fromarray(array_bw) return pil_bw, array_bw def compare(): imgs = os.listdir(os.path.join(home, 'flags')) [print(f'{str(i+1).rjust(2)}. {img}') for i, img in enumerate(imgs)] #x = 0 while True: first = int(input('one>: ')) img1 = Image.open(os.path.join(home, 'flags', imgs[first-1])) print(img1) second = int(input('two>: ')) img2 = Image.open(os.path.join(home, 'flags', imgs[second-1])) print(img2) #small, large = sorted([img1, img2], key=lambda img: img.size[0]) copy1 = img1.resize((64, 64)) copy2 = img2.resize((64, 64)) bw1, arr1 = convert_to_bw(copy1) bw2, arr2 = convert_to_bw(copy2) diff = ImageChops.difference(bw1, bw2) diff.show() arr = np.asarray(diff) total = 0 different = 0 for row in arr: for pixel in row: total += 1 if pixel == 255: different += 1 sim = ((1 - (different/total)) * 100) print(sim) if sim < 98: print('different flag') else: print('same flag') #diff.save(f'diff-{x}.jpg') #x += 1 def get_queue(queue): try: item = queue.get(block=False) return item except Empty: return None class ImageProcessingThread(threading.Thread): def __init__(self, main_queue, queue): super().__init__() self.queue = queue self.main_queue = main_queue self.x = 0 print('Image processing thread started') def run(self): while True: cap = get_queue(self.queue) if cap: self.process_frame(cap) def process_frame(self, cap): ret, frame = cap.read() if not ret: print('frame empty') main_queue.put('end') flipped = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = Image.fromarray(flipped) img.save(os.path.join('test', f'{self.x}.jpg')) self.x += 1 #cv2.imshow('image', frame) if cv2.waitKey(1)&0XFF == ord('q'): main_queue.put('end') pass def thread_test(): main_queue = Queue() processing_queue = Queue() processing_thread = ImageProcessingThread(main_queue, processing_queue) processing_thread.daemon = True processing_thread.start() print('test') get_stream_data(main_queue, processing_queue) def ocr_test(): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to input image to be OCR'd") ap.add_argument("-p", "--preprocess", type=str, default="thresh", help="type of preprocessing to be done") args = vars(ap.parse_args()) # load the example image and convert it to grayscale image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # check to see if we should apply thresholding to preprocess the # image if args["preprocess"] == "thresh": gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] # make a check to see if median blurring should be done to remove # noise elif args["preprocess"] == "blur": gray = cv2.medianBlur(gray, 3) # write the grayscale image to disk as a temporary file so we can # apply OCR to it #filename = "{}.png".format(os.getpid()) #cv2.imwrite(filename, gray) pil_gray = Image.fromarray(gray) # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file text = pytesseract.image_to_string(pil_gray) #os.remove(filename) print(text) # show the output images cv2.imshow("Image", image) cv2.imshow("Output", gray) cv2.waitKey(0) def game_color(): game = smash_game.Game() game.player_count = 4 img = Image.open(os.path.join('captures', '0001.3.FIGHT_START.png')) for edge in ut.COORDS['GAME']['PLAYER']['INFO'][game.player_count]: color_coords = list(ut.COORDS['GAME']['PLAYER']['COLOR']) color_coords[0] = edge - color_coords[0] color_coords[2] = edge - color_coords[2] crop = img.crop(color_coords) print(ut.match_color(pixel=crop, mode='GAME')) if __name__ == '__main__': #ocr_test() #fight_tester() #test_game_data() #test_stencil() #fight_tester() game_color() pass
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
radrumond/hidra
refs/heads/master
{"/main.py": ["/args.py", "/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py"], "/test.py": ["/data_gen/omni_gen.py"], "/train.py": ["/data_gen/omni_gen.py"], "/archs/fcn.py": ["/archs/maml.py"]}
└── ├── archs │ ├── fcn.py │ ├── hydra.py │ └── maml.py ├── args.py ├── data_gen │ └── omni_gen.py ├── main.py ├── test.py └── train.py
/archs/fcn.py
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow import os import numpy as np import tensorflow as tf from archs.maml import MAML class Model(MAML): def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2): super().__init__(train_lr,meta_lr,image_shape,isMIN,label_size) def dense_weights(self): weights = {} cells = {} initializer = tf.contrib.layers.xavier_initializer() print("Creating/loading Weights") divider = 1 inic = 1 filters = 64 finals = 64 if self.isMIN: divider = 2 inic = 3 finals = 800 filters = 32 with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE): weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer) weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer) weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer) weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer) weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant) weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant) weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant) weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant) weights['d_1'] = tf.get_variable('d_1w', [finals,self.label_size], initializer = initializer) weights['b_1'] = tf.get_variable('d_1b', [self.label_size], initializer=tf.initializers.constant) """weights['mean'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() ) weights['mean1'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance1'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset1'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale1'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() ) weights['mean2'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance2'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset2'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale2'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() ) weights['mean3'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance3'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset3'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale3'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )""" print("Done Creating/loading Weights") return weights, cells def forward(self,x,weights, training): conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1") conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE) conv1 = tf.nn.relu(conv1) conv1 = tf.layers.MaxPooling2D(2,2)(conv1) conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2") conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE) conv2 = tf.nn.relu(conv2) conv2 = tf.layers.MaxPooling2D(2,2)(conv2) conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3") conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE) conv3 = tf.nn.relu(conv3) conv3 = tf.layers.MaxPooling2D(2,2)(conv3) conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4") conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE) conv4 = tf.nn.relu(conv4) conv4 = tf.layers.MaxPooling2D(2,2)(conv4) # print(conv4) # bn = tf.squeeze(conv4,axis=(1,2)) bn = tf.layers.Flatten()(conv4) # tf.reshape(bn, [3244,234]) fc1 = self.fc_layer(bn,"dense1",weights["d_1"],weights["b_1"]) # bn = tf.reshape(bn,[-1,]) return fc1
/archs/hydra.py
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow import numpy as np import tensorflow as tf from archs.maml2 import MAML def getBin(l=10): x_ = 2 n = 1 while x_ < l: x_ = x_* 2 n += 1 numbers = [] for i in range(l): num = [] for j in list('{0:0b}'.format(i+1).zfill(n)): num.append(int(j)) numbers.append(num) return numbers class Model(MAML): def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2): super().__init__(train_lr,meta_lr,image_shape,isMIN, label_size) self.finals = 64 if isMIN: self.finals = 800 def getBin(self, l=10): x_ = 2 n = 1 while x_ < l: x_ = x_* 2 n += 1 numbers = [] for i in range(l): num = [] for j in list('{0:0b}'.format(i+1).zfill(n)): num.append(int(j)) numbers.append(num) return numbers def dense_weights(self): weights = {} cells = {} initializer = tf.contrib.layers.xavier_initializer() divider = 1 inic = 1 filters = 64 self.finals = 64 if self.isMIN: print("\n\n\n\n\n\n\n\n\nIS MIN\n\n\n\n\n\n\n\n\n\n\n") divider = 2 inic = 3 self.finals = 800 filters = 32 with tf.variable_scope('MASTER', reuse= tf.AUTO_REUSE): cells['d_1'] = tf.get_variable('MASTER_d_1w', [self.finals,1], initializer = initializer) cells['b_1'] = tf.get_variable('MASTER_d_1b', [1], initializer=tf.initializers.constant) with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE): weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer) weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer) weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer) weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer) weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant) weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant) weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant) weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant) for i in range (self.max_labels): weights['d_1w'+str(i)] = tf.get_variable('d_1w'+str(i), [self.finals,1], initializer = initializer) weights['b_1w'+str(i)] = tf.get_variable('d_1b'+str(i), [1], initializer=tf.initializers.constant) return weights, cells def forward(self,x,weights, training): # with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE): conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1") conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE) conv1 = tf.nn.relu(conv1) conv1 = tf.layers.MaxPooling2D(2,2)(conv1) conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2") conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE) conv2 = tf.nn.relu(conv2) conv2 = tf.layers.MaxPooling2D(2,2)(conv2) conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3") conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE) conv3 = tf.nn.relu(conv3) conv3 = tf.layers.MaxPooling2D(2,2)(conv3) conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4") conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE) conv4 = tf.nn.relu(conv4) conv4 = tf.layers.MaxPooling2D(2,2)(conv4) bn = tf.layers.Flatten()(conv4) agg = [self.fc_layer(bn,"dense"+str(i),weights["d_1w"+str(i)],weights["b_1w"+str(i)]) for i in range(self.max_labels)] fc1 = tf.concat(agg, axis=-1)[:,:self.label_n[0]] return fc1
/archs/maml.py
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow import os import numpy as np import tensorflow as tf class MAML: def __init__(self,train_lr,meta_lr,image_shape, isMIN, label_size=2): self.train_lr = train_lr self.meta_lr = meta_lr self.image_shape = image_shape self.isMIN = isMIN self.saver = None self.label_size = label_size self.finals = 64 self.maml_n = 1 if isMIN: self.finals = 800 def build(self, K, meta_batchsz, mode='train'): # Meta batch of tasks self.train_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]]) self.train_yb = tf.placeholder(tf.float32, [None,None,None]) self.val_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]]) self.val_yb = tf.placeholder(tf.float32, [None,None,None]) self.label_n = tf.placeholder(tf.int32 , 1, name="num_labs") #Initialize weights self.weights, self.cells = self.dense_weights() training = True if mode is 'train' else False # Handle one task update def meta_task(inputs): train_x, train_y, val_x, val_y = inputs val_preds, val_losses = [], [] train_pred = self.forward(train_x, self.weights, training) train_loss = tf.losses.softmax_cross_entropy(train_y,train_pred) grads = tf.gradients(train_loss, list(self.weights.values())) gvs = dict(zip(self.weights.keys(), grads)) a=[self.weights[key] - self.train_lr * gvs[key] for key in self.weights.keys()] # for key in self.weights.keys(): # print(key, gvs[key]) fast_weights = dict(zip(self.weights.keys(),a)) # Validation after each update val_pred = self.forward(val_x, fast_weights, training) val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred) # record T0 pred and loss for meta-test val_preds.append(val_pred) val_losses.append(val_loss) # continue to build T1-TK steps graph for _ in range(1, K): # Update weights on train data of task t loss = tf.losses.softmax_cross_entropy(train_y,self.forward(train_x, fast_weights, training)) grads = tf.gradients(loss, list(fast_weights.values())) gvs = dict(zip(fast_weights.keys(), grads)) fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.train_lr * gvs[key] for key in fast_weights.keys()])) # Evaluate validation data of task t val_pred = self.forward(val_x, fast_weights, training) val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred) val_preds.append(val_pred) val_losses.append(val_loss) result = [train_pred, train_loss, val_preds, val_losses] return result out_dtype = [tf.float32, tf.float32,[tf.float32] * K, [tf.float32] * K] result = tf.map_fn(meta_task, elems=(self.train_xb, self.train_yb, self.val_xb, self.val_yb), dtype=out_dtype, parallel_iterations=meta_batchsz, name='map_fn') train_pred_tasks, train_loss_tasks, val_preds_tasks, val_losses_tasks = result if mode is 'train': self.train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz self.val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)] self.val_predictions = val_preds_tasks optimizer = tf.train.AdamOptimizer(self.meta_lr, name='meta_optim') gvs = optimizer.compute_gradients(self.val_losses[-1]) gvs = [(tf.clip_by_norm(grad, 10), var) for grad, var in gvs] self.meta_op = optimizer.apply_gradients(gvs) else: self.test_train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz self.test_val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)] self.val_predictions = val_preds_tasks self.saving_weights = tf.trainable_variables() def conv_layer(self, x, W, b, name, strides=1): with tf.variable_scope(name,reuse=tf.AUTO_REUSE): x = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') x = tf.nn.bias_add(x, b) return x def fc_layer(self,x, name, weights=None, biases=None): with tf.variable_scope(name,reuse=tf.AUTO_REUSE): fc = tf.matmul(x, weights) fc = tf.nn.bias_add(fc, biases) return fc def loadWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'): if self.saver == None: z = self.saving_weights #print("KEYS:", z.keys()) self.saver = tf.train.Saver(var_list=z, max_to_keep=12) saver = self.saver checkpoint_path = modeldir + f"{name}/"+model_name +"-" + step if os.path.isfile(checkpoint_path+".marker"): saver.restore(sess, checkpoint_path) print('The checkpoint has been loaded.') else: print(checkpoint_path+".marker not found. Starting from scratch.") def saveWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'): if self.saver == None: z = self.saving_weights self.saver = tf.train.Saver(var_list=z, max_to_keep=12) saver = self.saver checkpoint_path = modeldir + f"{name}/"+model_name if not os.path.exists(modeldir): os.makedirs(modeldir) saver.save(sess, checkpoint_path, global_step=step) print('The checkpoint has been created.') open(checkpoint_path+"-"+str(int(step))+".marker", 'a').close() def dense_weights(self): return def forward(self,x,weights, training): return
/args.py
""" Command-line argument parsing. """ import argparse #from functools import partial import time import tensorflow as tf import json import os def boolean_string(s): if s not in {'False', 'True'}: raise ValueError('Not a valid boolean string') return s == 'True' def argument_parser(): """ Get an argument parser for a training script. """ file_time = int(time.time()) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--arch', help='name architecture', default="fcn", type=str) parser.add_argument('--seed', help='random seed', default=0, type=int) parser.add_argument('--name', help='name add-on', type=str, default='Model_config-'+str(file_time)) parser.add_argument('--dataset', help='data set to evaluate on', type=str, default='Omniglot') parser.add_argument('--data_path', help='path to data folder', type=str, default='/home/') parser.add_argument('--config', help='json config file', type=str, default=None) parser.add_argument('--checkpoint', help='checkpoint directory', default='model_checkpoint') parser.add_argument('--test', help='Testing or Not', action='store_true') parser.add_argument('--testintrain', help='Testing during train or Not', action='store_true') parser.add_argument('--min_classes', help='minimum number of classes for n-way', default=2, type=int) parser.add_argument('--max_classes', help='maximum (excluded) number of classes for n-way', default=2, type=int) parser.add_argument('--ttrain_shots', help='number of examples per class in meta train', default=5, type=int) parser.add_argument('--ttest_shots', help='number of examples per class in meta test', default=15, type=int) parser.add_argument('--etrain_shots', help='number of examples per class in meta train', default=5, type=int) parser.add_argument('--etest_shots', help='number of examples per class in meta test', default=15, type=int) parser.add_argument('--train_inner_K', help='number of inner gradient steps during meta training', default=5, type=int) parser.add_argument('--test_inner_K', help='number of inner gradient steps during meta testing', default=5, type=int) parser.add_argument('--learning_rate', help='Adam step size for inner training', default=0.4, type=float) parser.add_argument('--meta_step', help='meta-training step size', default=0.01, type=float) parser.add_argument('--meta_batch', help='meta-training batch size', default=1, type=int) parser.add_argument('--meta_iters', help='meta-training iterations', default=70001, type=int) parser.add_argument('--eval_iters', help='meta-training iterations', default=2000, type=int) parser.add_argument('--step', help='Checkpoint step to load', default=59999, type=float) # python main_emb.py --meta_step 0.005 --meta_batch 8 --learning_rate 0.3 --test --checkpoint Model_config-1568818723 args = vars(parser.parse_args()) #os.system("mkdir -p " + args['checkpoint']) if args['config'] is None: args['config'] = f"{args['checkpoint']}/{args['name']}/{args['name']}.json" print(args['config']) # os.system("mkdir -p " + f"{args['checkpoint']}") os.system("mkdir -p " + f"{args['checkpoint']}/{args['name']}") with open(args['config'], 'w') as write_file: print("Json Dumping...") json.dump(args, write_file) else: with open(args['config'], 'r') as open_file: args = json.load(open_file) return parser def train_kwargs(parsed_args): """ Build kwargs for the train() function from the parsed command-line arguments. """ return { 'min_classes': parsed_args.min_classes, 'max_classes': parsed_args.max_classes, 'train_shots': parsed_args.ttrain_shots, 'test_shots': parsed_args.ttest_shots, 'meta_batch': parsed_args.meta_batch, 'meta_iters': parsed_args.meta_iters, 'test_iters': parsed_args.eval_iters, 'train_step' : parsed_args.step, 'name': parsed_args.name, } def test_kwargs(parsed_args): """ Build kwargs for the train() function from the parsed command-line arguments. """ return { 'eval_step' : parsed_args.step, 'min_classes': parsed_args.min_classes, 'max_classes': parsed_args.max_classes, 'train_shots': parsed_args.etrain_shots, 'test_shots': parsed_args.etest_shots, 'meta_batch': parsed_args.meta_batch, 'meta_iters': parsed_args.eval_iters, 'name': parsed_args.name, }
/data_gen/omni_gen.py
import numpy as np import os import cv2 import pickle class MiniImgNet_Gen: def __init__(self,path="/tmp/data/miniimagenet",data_path=None): if data_path is None: self.path = path self.train_paths = ["train/"+x for x in os.listdir(path+"/train")] self.test_paths = ["test/"+x for x in os.listdir(path+"/test")] self.val_paths = ["val/"+x for x in os.listdir(path+"/val")] self.data_path = data_path self.meta_train = None self.meta_test = None self.meta_val = None def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True): print('Loading MiniImagenet data...') if training == "train": if self.meta_train is None: meta_data = [] for idx,im_class in enumerate(self.train_paths): meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True))) self.meta_train = meta_data else: meta_data = self.meta_train elif training == "val": if self.meta_val is None: meta_data = [] for idx,im_class in enumerate(self.val_paths): # print(idx) meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True))) self.meta_val = meta_data else: meta_data = self.meta_val elif training == "test": if self.meta_test is None: meta_data = [] for idx,im_class in enumerate(self.test_paths): # print(idx) meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True))) self.meta_test = meta_data else: meta_data = self.meta_test else: raise ValueError("Training needs to be train, val or test") print(f'Finished loading MiniImagenet data: {np.array(meta_data).shape}') if min_class < 2: raise ValueError("Minimum number of classes must be >=2") while True: meta_train_x = [] meta_train_y = [] meta_test_x = [] meta_test_y = [] # sample fixed number classes for a meta batch nr_classes = np.random.randint(min_class,max_class) for mb in range(mb_size): # select which classes in the meta batch classes = np.random.choice(range(len(meta_data)),nr_classes,replace=False) train_x = [] train_y = [] test_x = [] test_y = [] for label_nr,cl in enumerate(classes): images = np.random.choice(len(meta_data[cl]),train_size+test_size,False) train_imgs = images[:train_size] test_imgs = images[train_size:] train_x.append(meta_data[cl][train_imgs]) test_x.append(meta_data[cl][test_imgs]) train_y.append(np.ones(train_size)*label_nr) test_y.append(np.ones(test_size)*label_nr) train_x = np.array(train_x) train_y = np.eye(len(classes))[np.reshape(np.array(train_y),-1).astype(int)] test_x = np.array(test_x) test_y = np.eye(len(classes))[np.reshape(np.array(test_y),-1).astype(int)] train_x = np.reshape(train_x,[-1,84,84,3]) test_x = np.reshape(test_x,[-1,84,84,3]) if shuffle: train_x,train_y = unison_shuffled_copies(train_x,train_y) test_x,test_y = unison_shuffled_copies(test_x,test_y) meta_train_x.append(train_x) meta_train_y.append(train_y) meta_test_x.append(test_x) meta_test_y.append(test_y) # print('YIEEEEEEELDING') yield meta_train_x,meta_train_y,meta_test_x,meta_test_y # Initiates the Omniglot dataset and splits into meta train and meta task class OmniChar_Gen: def __init__(self,path="/tmp/data/omniglot",data_path=None,test_idx=None): self.path = path self.tasks = ["/images_background/"+x for x in os.listdir(path+"/images_background")]+["/images_evaluation/"+x for x in os.listdir(path+"/images_evaluation")] self.lens = {} for task in self.tasks: self.lens[task] = len(os.listdir(self.path+task)) self.meta_data = [] print("Loading Omniglot data") for idx,task in enumerate(range(len(self.tasks))): if idx%10==0: print(f"Loading tasks {idx}/{len(self.tasks)}") data = [] for char in os.listdir(self.path+self.tasks[task]): c = [] for img in os.listdir(self.path+self.tasks[task]+"/"+char): c.append(readImg(self.path+self.tasks[task]+"/"+char+"/"+img)) data.append(c) self.meta_data.append(data) self.meta_data = np.concatenate(self.meta_data) print("Finished loading data") if test_idx==None: self.train_idx = list(range(len(self.meta_data))) np.random.shuffle(self.train_idx) self.test_idx = self.train_idx[1200:] self.train_idx = self.train_idx[:1200] print("Test_idx:",self.test_idx) else: self.test_idx = test_idx self.train_idx = list(set(list(range(len(self.meta_data)))) - set(self.test_idx)) # Builds a generator that samples meta batches from meta training/test data def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True): if training == "train": idx = self.train_idx elif training == "test": idx = self.test_idx else: raise ValueError("Omniglot only supports train and test for training param") if min_class < 2: raise ValueError("Minimum number of classes must be >=2") ## We can remove this later and make it dynamic while True: image_idx = idx.copy() np.random.shuffle(image_idx) meta_train_x = [] meta_train_y = [] meta_test_x = [] meta_test_y = [] # Roll number of classes in the mb nr_classes = np.random.randint(min_class,max_class) for task in range(mb_size): train_x = [] train_y = [] test_x = [] test_y = [] # Sample the characters for the task chars = np.random.choice(image_idx,nr_classes,False) # Sample the shots for each character for label_nr,char in enumerate(chars): images = np.random.choice(range(20),train_size+test_size,False) train_imgs = images[:train_size] test_imgs = images[train_size:] train_x.append(self.meta_data[char][train_imgs]) test_x.append(self.meta_data[char][test_imgs]) train_y.append(np.ones(train_size)*label_nr) test_y.append(np.ones(test_size)*label_nr) train_x = np.array(train_x) train_y = np.eye(len(chars))[np.reshape(np.array(train_y),-1).astype(int)] test_x = np.array(test_x) test_y = np.eye(len(chars))[np.reshape(np.array(test_y),-1).astype(int)] train_x = np.reshape(train_x,[-1,28,28,1]) test_x = np.reshape(test_x,[-1,28,28,1]) if shuffle: train_x,train_y = unison_shuffled_copies(train_x,train_y) test_x,test_y = unison_shuffled_copies(test_x,test_y) meta_train_x.append(train_x) meta_train_y.append(train_y) meta_test_x.append(test_x) meta_test_y.append(test_y) yield meta_train_x,meta_train_y,meta_test_x,meta_test_y def getOrder(minClass,maxClass,mb_size,number_chars=1200): # gives a list integers between minClass and maxClass that sum up to 1200, lens = [] sums = 0 while sums<=number_chars-minClass*mb_size: maxV = int((number_chars-sums)/mb_size)+1 n=np.random.randint(minClass,min(maxV,maxClass)) lens += [n]*mb_size sums = sums+(n*mb_size) return lens def readImg(path,size=[28,28],rgb=False): img = cv2.imread(path) img = cv2.resize(img,(size[0],size[1])).astype(float) if np.max(img)>1.0: img /= 255. if not rgb: return img[:,:,:1] else: if len(img.shape)==3: if img.shape[-1]!=3: print('ASFASFASFAS') print(img.shape) print(path) return img else: return np.reshape([img,img,img],[size[0],size[1],3]) def unison_shuffled_copies(a, b): assert len(a) == len(b) p = np.random.permutation(len(a)) return a[p], b[p] def loadImgDir(path,size,rgb): imgs = [] for img in os.listdir(path): imgs.append(readImg(path+"/"+img,size,rgb)) return imgs
/main.py
## Created by Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen from archs.fcn import Model as mfcn from archs.hydra import Model as mhyd from train import * from test import * from args import argument_parser, train_kwargs, test_kwargs import random args = argument_parser().parse_args() random.seed(args.seed) t_args = train_kwargs(args) e_args = test_kwargs (args) print("########## argument sheet ########################################") for arg in vars(args): print (f"#{arg:>15} : {str(getattr(args, arg))} ") print("##################################################################") print("Loading Data...") if args.dataset in ["Omniglot", "omniglot", "Omni", "omni"]: loader = OmniChar_Gen (args.data_path) isMIN = False shaper = [28,28,1] elif args.dataset in ["miniimagenet", "MiniImageNet", "mini"]: loader = MiniImgNet_Gen(args.data_path) isMIN = True shaper = [84,84,3] else: raise ValueError("INVALID DATA-SET NAME!") print("Building Model...") if args.arch == "fcn"or args.arch == "maml": print("SELECTED: MAML") m = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) mt = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) #elif args.arch == "rnn": # m = mrnn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.min_classes) elif args.arch == "hydra" or args.arch == "hidra": print("SELECTED: HIDRA") m = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) mt = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) else: raise ValueError("INVALID Architecture NAME!") mode = "train" if args.test: mode = "test" print("Starting Test Step...") mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode=mode) test (mt, loader, **e_args) else: modeltest = None if args.testintrain: mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode="test") modeltest = mt print("Starting Train Step...") m.build (K = args.train_inner_K, meta_batchsz = args.meta_batch, mode=mode) train(m, modeltest, loader, **t_args)
/test.py
import numpy as np import tensorflow as tf from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen def test(m, data_sampler, eval_step, min_classes, max_classes, train_shots, test_shots, meta_batch, meta_iters, name): sess = tf.Session() sess.run(tf.global_variables_initializer()) losses=[] temp_yp = [] aps = [] buffer = [] lossesB=[] train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test") print("TEST MODE") m.loadWeights(sess, name, step = str(int(eval_step)), model_name=name+".ckpt") for i in range(meta_iters): xb1,yb1,xb2,yb2 = next(train_gen) num_l = [len(np.unique(np.argmax(yb1,axis=-1)))] if m.maml_n == 2: sess.run(m.init_assign, feed_dict={m.label_n:[5]}) l,vals,ps=sess.run([m.test_train_loss,m.test_val_losses,m.val_predictions],feed_dict={m.train_xb: xb1, m.train_yb: yb1, m.val_xb:xb2, m.val_yb:yb2, m.label_n:num_l}) losses.append(vals) lossesB.append(vals) buffer.append(l) true_vals = np.argmax(yb2,axis=-1) all_accs = [] for pred_epoch in range(len(ps)): all_accs.append(np.mean(np.argmax(ps[pred_epoch],axis=-1)==true_vals)) temp_yp.append(all_accs) # if i%1==0: if i%50==0: print(f"({i}/{meta_iters})") print(f"Final: TLoss {np.mean(buffer)}, VLoss {np.mean(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}" ) print(f"Final: TLoss {np.mean(buffer)}-{np.std(buffer)}, VLoss {np.mean(lossesB,axis=0)}-{np.std(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}-{np.std(temp_yp,axis=0)}" )
/train.py
import numpy as np import tensorflow as tf from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen import time def train( m, mt, # m is the model foir training, mt is the model for testing data_sampler, # Creates the data generator for training and testing min_classes, # minimum amount of classes max_classes, # maximum || || || train_shots, # number of samples per class (train) test_shots, # number of samples per class (test) meta_batch, # Number of tasks meta_iters, # Number of iterations test_iters, # Iterations in Test train_step, name): # Experiment name for experiments sess = tf.Session() sess.run(tf.global_variables_initializer()) # bnorms = [v for v in tf.global_variables() if "bn" in v.name] #---------Performance Tracking lists--------------------------------------- losses = [] temp_yp = [] temp_ypn= [] nls = [] aps = [] buffer = [] lossesB = [] #-------------------------------------------------------------------------- #---------Load train and test data-sets------------------------------------ train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"train") if mt is not None: test_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test" ) m.loadWeights(sess, name, step=str(int(train_step)), model_name=name+".ckpt") #-------------------------------------------------------------------------- #TRAIN LOOP print("Starting meta training:") start = time.time() for i in range(meta_iters): xb1,yb1,xb2,yb2 = next(train_gen) num_l = [len(np.unique(np.argmax(yb1,axis=-1)))] if m.maml_n == 2: # in case it uses hydra master node, it should re-assign the output nodes from the master sess.run(m.init_assign, feed_dict={m.label_n:[5]}) l,_,vals,ps=sess.run([m.train_loss,m.meta_op,m.val_losses,m.val_predictions],feed_dict={m.train_xb: xb1, m.train_yb: yb1, m.val_xb:xb2, m.val_yb:yb2, m.label_n:num_l}) if m.maml_n == 2: # in case it uses hydra master node, it should update the master sess.run(m.final_assign,feed_dict={m.label_n:num_l}) losses.append(vals) lossesB.append(vals) buffer.append(l) #Calculate accuaracies aux = [] tmp_pred = np.argmax(np.reshape(ps[-1],[-1,num_l[0]]),axis=-1) tmp_true = np.argmax(np.reshape(yb2,[-1,num_l[0]]),axis=-1) for ccci in range(num_l[0]): tmp_idx = np.where(tmp_true==ccci)[0] #print(tmp_idx) aux.append(np.mean(tmp_pred[tmp_idx]==tmp_true[tmp_idx])) temp_yp.append(np.mean(tmp_pred==tmp_true)) temp_ypn.append(aux) #EVALUATE and PRINT if i%100==0: testString = "" #If we give a test model, it will test using the weights from train if mt is not None and i%1000==0: lossestest = [] buffertest = [] lossesBtest = [] temp_yptest = [] for z in range(100): if m.maml_n == 2: sess.run(mt.init_assign, feed_dict={mt.label_n:[5]}) xb1,yb1,xb2,yb2 = next(test_gen) num_l = [len(np.unique(np.argmax(yb1,axis=-1)))] l,vals,ps=sess.run([mt.test_train_loss,mt.test_val_losses,mt.val_predictions],feed_dict={mt.train_xb: xb1, mt.train_yb: yb1, mt.val_xb:xb2, mt.val_yb:yb2, mt.label_n:num_l}) lossestest.append(vals) lossesBtest.append(vals) buffertest.append(l) temp_yptest.append(np.mean(np.argmax(ps[-1],axis=-1)==np.argmax(yb2,axis=-1))) testString = f"\n TEST: TLoss {np.mean(buffertest):.3f} VLoss {np.mean(lossesBtest,axis=0)[-1]:.3f}, ACCURACY {np.mean(temp_yptest):.4f}" print(f"Epoch {i}: TLoss {np.mean(buffer):.4f}, VLoss {np.mean(lossesB,axis=0)[-1]:.4f},", f"Accuracy {np.mean(temp_yp):.4}", f", Per label acc: {[float('%.4f' % elem) for elem in aux]}", f"Finished in {time.time()-start}s",testString) buffer = [] lossesB = [] temp_yp = [] start = time.time() # f"\n TRUE: {yb2}\n PRED: {ps}") if i%5000==0: print("Saving...") m.saveWeights(sess, name, i, model_name=name+".ckpt") m.saveWeights(sess, name, i, model_name=name+".ckpt")
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
sebastianden/alpaca
refs/heads/master
{"/src/cam.py": ["/src/utils.py"], "/src/test_time.py": ["/src/utils.py", "/src/alpaca.py"], "/src/test_use_case.py": ["/src/utils.py", "/src/alpaca.py"], "/src/alpaca.py": ["/src/utils.py"], "/src/main.py": ["/src/utils.py", "/src/alpaca.py"], "/src/test_voting.py": ["/src/utils.py", "/src/alpaca.py"]}
└── └── src ├── alpaca.py ├── cam.py ├── gridsearch_results.py ├── main.py ├── test_time.py ├── test_use_case.py ├── test_voting.py └── utils.py
/src/alpaca.py
import warnings warnings.simplefilter(action='ignore') import pickle import pandas as pd import numpy as np from utils import TimeSeriesScalerMeanVariance, Flattener, Featuriser, plot_dtc from sklearn.pipeline import Pipeline from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_curve, auc from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.base import ClassifierMixin, BaseEstimator, clone from tslearn.clustering import TimeSeriesKMeans from tslearn.neighbors import KNeighborsTimeSeriesClassifier from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from IPython.display import SVG from tensorflow.keras.utils import model_to_dot from tensorflow.keras.utils import plot_model class Alpaca(ClassifierMixin): """ A learning product classification algorithm. """ def __init__(self): self.anomaly_detection = AnomalyDetection() self.classifier = Classifier() def fit(self, X, y, stacked=True): """ Fit the algorithm according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Training samples. y : array-like of shape (n_samples,) True labels for X. stacked: bool If true train a meta classifier on kfold CV predictions of the level 1 classifiers Returns ------- self: object Fitted model """ # Fit anomaly detection # Do GridSearch to get best model param_grid = {'n_clusters': [10,50,100,200]} grid = GridSearchCV(self.anomaly_detection, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\ad.csv",index=False) print(grid.best_params_) # Take best model self.anomaly_detection = grid.best_estimator_ # Save the model with open("models\\ad.pkl", 'wb') as file: pickle.dump(self.anomaly_detection, file) # Fit ensemble classifier self.classifier.fit(X, y, stacked) return self def predict(self, X, voting): """ Perform a classification on samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Test samples. voting: string Voting scheme to use Returns ------- y_pred: array, shape (n_samples,) Predictions from ensemble with suggested class labels y_pred_bin: array, shape (n_samples,) Combined binary predictions """ # Class predictions of ensemble y_pred, y_pred_ens = self.classifier.predict(X, voting=voting) # Binary predictions of anomaly detector y_pred_ad = self.anomaly_detection.predict(X) # Save individual predictions y_pred_indiv = np.column_stack((y_pred_ens, y_pred_ad)).astype(int) df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_dtc','y_pred_svc','y_pred_cnn','y_pred_ad']) df_results.to_csv("results\\y_pred_indiv.csv",index=False) # Overwrite the entries in y_pred_knn with positive, where ensemble decides positive y_pred_bin = np.where(y_pred != 0, 1, y_pred_ad) return y_pred_bin, y_pred class AnomalyDetection(ClassifierMixin, BaseEstimator): """ Anomaly detection with 1-NN and automatic calculation of optimal threshold. """ def __init__(self, n_clusters=200): self.knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, weights='uniform', metric='euclidean', n_jobs=-1) self.d = None self.n_clusters = n_clusters def fit(self, X, y): """ Fit the algorithm according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Training samples. y : array-like of shape (n_samples,) True labels for X. Returns ------- self: object Fitted model """ # Fit anomaly detection knn over k-means centroids X_good = X[np.where(y == 0)] X_bad = X[np.where(y != 0)] km = TimeSeriesKMeans(n_clusters=self.n_clusters, metric="euclidean", max_iter=100, random_state=0, n_jobs=-1).fit(X_good) self.knn.fit(km.cluster_centers_, np.zeros((self.n_clusters,))) # Calculate distances to all samples in good and bad d_bad, _ = self.knn.kneighbors(X_bad) d_good, _ = self.knn.kneighbors(X_good) # Calculate ROC y_true = np.hstack((np.zeros(X_good.shape[0]), np.ones(X_bad.shape[0]))) y_score = np.vstack((d_good, d_bad)) fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label=1) # Determine d by Youden index self.d = thresholds[np.argmax(tpr - fpr)] return self def predict(self, X): """ Perform a classification on samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Test samples. Returns ------- y_pred: array, shape (n_samples,) Predictions """ # Binary predictions of anomaly detector y_pred = np.squeeze(np.where(self.knn.kneighbors(X)[0] < self.d, 0, 1)) return y_pred class Classifier(ClassifierMixin): """ Classifier part with ensemble of estimators. """ def __init__(self): # DTC pipeline featuriser = Featuriser() dtc = DecisionTreeClassifier() self.dtc_pipe = Pipeline([('featuriser', featuriser), ('dtc', dtc)]) # SVC pipeline scaler = TimeSeriesScalerMeanVariance(kind='constant') flattener = Flattener() svc = SVC() self.svc_pipe = Pipeline([('scaler', scaler), ('flattener', flattener), ('svc', svc)]) # Keras pipeline #len_filter = round(len_input*0.05) #num_filter = 8 cnn = KerasClassifier(build_fn=build_cnn, epochs=100, verbose=0) self.cnn_pipe = Pipeline([('scaler', scaler), ('cnn', cnn)]) # Meta classifier self.meta_dtc = DecisionTreeClassifier() self.meta_svc = SVC() def fit(self, X, y, stacked): """ Fit each individual estimator of the ensemble model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Training samples. y : array-like of shape (n_samples,) True labels for X. stacked: bool If true train a meta classifier on kfold CV predictions of the level 1 classifiers Returns ------- self: object Fitted model """ # Fit DTC # Do GridSearch to get best model param_grid = {'featuriser__windows': [1, 2, 3, 4, 5, 6], 'dtc__max_depth': [3, 4, 5], 'dtc__criterion': ['gini', 'entropy']} grid = GridSearchCV(self.dtc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\dtc.csv",index=False) print(grid.best_params_) # Take best model self.dtc_pipe = grid.best_estimator_ # Plot the dtc #plot_dtc(self.dtc_pipe['dtc']) # Save the model with open("models\\dtc_pipe.pkl", 'wb') as file: pickle.dump(self.dtc_pipe, file) # Fit SVC # Do GridSearch to get best model param_grid = {'svc__C': [10, 100, 1000, 10000], 'svc__gamma': [0.01, 0.001, 0.0001, 0.00001], 'svc__degree': [2, 3], 'svc__kernel': ['rbf', 'linear', 'poly']} grid = GridSearchCV(self.svc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\svc.csv",index=False) print(grid.best_params_) # Take best model self.svc_pipe = grid.best_estimator_ # Save the model with open("models\\svc_pipe.pkl", 'wb') as file: pickle.dump(self.dtc_pipe, file) # Fit CNN # Do GridSearch to get best model param_grid = {'cnn__num_channels':[X.shape[2]], 'cnn__len_input':[X.shape[1]], 'cnn__num_classes':[np.unique(y).shape[0]], 'cnn__batch_size': [20, 30], 'cnn__num_filter': [4, 8, 16], 'cnn__num_layer': [1, 2], 'cnn__len_filter': [0.05, 0.1, 0.2]} # len_filter is defined as fraction of input_len grid = GridSearchCV(self.cnn_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\cnn.csv",index=False) print(grid.best_params_) # Take best model self.cnn_pipe = grid.best_estimator_ # Save the model self.cnn_pipe['cnn'].model.save("models\\cnn.h5") # Fit the Metaclassifiers if stacked: # Get level 1 classifier predictions as training data X_stacked, y_stacked = kfoldcrossval(self, X, y, k=5) # Fit Meta DTC self.meta_dtc.fit(X_stacked, y_stacked) # Save the model with open("models\\meta_dtc.pkl", 'wb') as file: pickle.dump(self.meta_dtc, file) # Fit Meta SVC self.meta_svc.fit(X_stacked, y_stacked) # Save the model with open("models\\meta_svc.pkl", 'wb') as file: pickle.dump(self.meta_svc, file) return self def predict(self, X, voting='veto'): """ Perform a classification on samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Test samples. voting: string Voting scheme to use Returns ------- y_pred: array, shape (n_samples,) Predictions y_pred_ens: array, shape (n_samples, 3) Predictions of the individual estimators """ y_pred = np.empty(np.shape(X)[0]) # Parallelize this part y_dtc = self.dtc_pipe.predict(X) y_svc = self.svc_pipe.predict(X) y_cnn = self.cnn_pipe.predict(X) y_pred_ens = np.stack([y_dtc, y_svc, y_cnn], axis=1).astype(int) if voting == 'veto': for i in range(np.shape(X)[0]): if y_dtc[i] == y_svc[i] == y_cnn[i]: y_pred[i] = y_dtc[i] else: y_pred[i] = -1 if voting == 'democratic': for i in range(np.shape(X)[0]): y_pred[i] = np.argmax(np.bincount(y_pred_ens[i, :])) if voting == 'meta_dtc': y_pred = self.meta_dtc.predict(y_pred_ens) if voting == 'meta_svc': y_pred = self.meta_svc.predict(y_pred_ens) return y_pred, y_pred_ens def kfoldcrossval(model, X, y, k=5): """ Performs another cross-validation with the optimal models in order to get the level 1 predictions to train the meta classifier. Parameters ---------- model: object Ensemble classifier object X : array-like of shape (n_samples, n_features, n_channels) Samples. y : array-like of shape (n_samples,) True labels for X. k: int Number of splits Returns ------- X_stack: array-like of shape (n_samples, n_features) Level 1 predictions as training data for metaclassifier y_stack: array-like of shape (n_samples,) Targets for metaclassifier """ kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=42) X_stack = np.empty((0, 3)) y_stack = np.empty((0,)) # Make a copy of the already fitted classifiers (to not overwrite the originals) dtc_temp = clone(model.dtc_pipe) svc_temp = clone(model.svc_pipe) cnn_temp = clone(model.cnn_pipe) # Train classifiers agin in kfold crossvalidation to get level 1 predictions for train, test in kfold.split(X, y): # Train all models on train dtc_temp.fit(X[train], y[train]) svc_temp.fit(X[train], y[train]) cnn_temp.fit(X[train], y[train]) # Test all on test y0 = dtc_temp.predict(X[test]) y1 = svc_temp.predict(X[test]) y2 = cnn_temp.predict(X[test]) # Concatenate predictions of individual classifier a = np.stack((y0, y1, y2), axis=-1).astype(int) # Concatenate with predictions from other splits X_stack = np.vstack((X_stack, a)) y_stack = np.hstack((y_stack, y[test])) return X_stack, y_stack def build_cnn(num_filter, len_filter, num_layer, num_channels, len_input, num_classes): """ Function returning a keras model. Parameters ---------- num_filter: int Number of filters / kernels in the conv layer len_filter: float Length of the filters / kernels in the conv layer as fraction of inputlength num_layer: int Number of convlutional layers in the model num_channels: int Number of channels of the input len_input: int Number of dimensions of the input num_classes: int Number of classes in the dataset = Number of outputs Returns ------- model: sequential keras model Keras CNN model ready to be trained """ model = Sequential() # First Conv Layer model.add(Conv1D(filters=num_filter, kernel_size=int(len_filter*len_input), strides=1, padding="same", activation='relu', input_shape=(len_input, num_channels), name='block1_conv1')) model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block1_pool')) # Other Conv Layers for l in range(2, num_layer + 1): model.add(Conv1D(filters=num_filter*l, kernel_size=int(len_filter * len_input), strides=1, padding="same", activation='relu', name='block' + str(l) + '_conv1')) model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block' + str(l) + '_pool')) model.add(Flatten(name='flatten')) model.add(Dense(100, activation='relu', name='fc1')) model.add(Dense(num_classes, activation='softmax',name='predictions')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) plot_model(model,dpi = 300, show_shapes=True, to_file='models\\cnn.png') return model
/src/cam.py
import tensorflow.keras.backend as K import tensorflow.keras from tensorflow.keras.layers import Lambda from tensorflow.keras.models import Model, load_model tensorflow.compat.v1.disable_eager_execution() import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt from utils import to_time_series_dataset, split_df, load_test, TimeSeriesResampler, TimeSeriesScalerMeanVariance from scipy.interpolate import interp1d import seaborn as sns sns.set(style='white',font='Palatino Linotype',font_scale=1,rc={'axes.grid' : False}) def get_model(id): model = load_model('.\\models\\cam_cnn_'+id+'.h5') return model def target_category_loss(x, category_index, nb_classes): return tf.multiply(x, K.one_hot([category_index], nb_classes)) def target_category_loss_output_shape(input_shape): return input_shape def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) def load_data(dataset): if dataset == 'test': X, y = load_test() sz = 230 elif dataset == 'uc1': X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') # Length of timeseries for resampler and cnn sz = 38 elif dataset == 'uc2': X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') # Length of timeseries for resampler and cnn sz = 200 resampler = TimeSeriesResampler(sz=sz) X = resampler.fit_transform(X, y) y = np.array(y) return X, y def get_sample(X, y, label, rs=100): s = np.random.RandomState(rs) s = s.choice(np.where(y == label)[0], 1) x_raw = to_time_series_dataset(X[s, :, :]) scaler = TimeSeriesScalerMeanVariance(kind='constant') X = scaler.fit_transform(X) x_proc = to_time_series_dataset(X[s, :, :]) return x_proc, x_raw def _compute_gradients(tensor, var_list): grads = tf.gradients(tensor, var_list) return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)] def grad_cam(input_model, data, category_index, nb_classes, layer_name): # Lambda function for getting target category loss target_layer = lambda x: target_category_loss(x, category_index, nb_classes) # Lambda layer for function x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output) # Add Lambda layer as output to model model = Model(inputs=input_model.input, outputs=x) #model.summary() # Function for getting target category loss y^c loss = K.sum(model.output) # Get the layer with "layer_name" as name conv_output = [l for l in model.layers if l.name == layer_name][0].output # Define function to calculate gradients grads = normalize(_compute_gradients(loss, [conv_output])[0]) gradient_function = K.function([model.input], [conv_output, grads]) # Calculate convolution layer output and gradients for datasample output, grads_val = gradient_function([data]) output, grads_val = output[0, :], grads_val[0, :, :] # Calculate the neuron importance weights as mean of gradients weights = np.mean(grads_val, axis = 0) # Calculate CAM by multiplying weights with the respective output cam = np.zeros(output.shape[0:1], dtype = np.float32) for i, w in enumerate(weights): cam += w * output[:, i] # Interpolate CAM to get it back to the original data resolution f = interp1d(np.linspace(0, 1, cam.shape[0]), cam, kind="slinear") cam = f(np.linspace(0,1,data.shape[1])) # Apply ReLU function to only get positive values cam[cam < 0] = 0 return cam def plot_grad_cam(cam, raw_input, cmap, alpha, language='eng'): fig, ax = plt.subplots(raw_input.shape[-1], 1, figsize=(15, 9), sharex=True) # fig.suptitle('Gradient Class Activation Map for sample of class %d' %predicted_class) if language == 'eng': ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Velocity $\mathit{v}$ in m/s", r"Current $\mathit{I}$ in A"] if language == 'ger': ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Geschwindigkeit $\mathit{v}$ in m/s", r"Stromstärke $\mathit{I}$ in A"] for i, a in enumerate(ax): left, right = (-1, raw_input.shape[1] + 1) range_input = raw_input[:, :, i].max() - raw_input[:, :, i].min() down, up = (raw_input[:, :, i].min() - 0.1 * range_input, raw_input[:, :, i].max() + 0.1 * range_input) a.set_xlim(left, right) a.set_ylim(down, up) a.set_ylabel(ax_ylabel[i]) im = a.imshow(cam.reshape(1, -1), extent=[left, right, down, up], aspect='auto', alpha=alpha, cmap=cmap) a.plot(raw_input[0, :, i], linewidth=2, color='k') fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) cbar = fig.colorbar(im, cax=cbar_ax) if language == 'eng': cbar_ax.set_ylabel('Activation', rotation=90, labelpad=15) if language == 'ger': cbar_ax.set_ylabel('Aktivierung', rotation=90, labelpad=15) return ax if __name__ == "__main__": X, y = load_data('test') nb_classes = np.unique(y).shape[0] # Load model and datasample preprocessed_input, raw_input = get_sample(X, y, label=1) model = get_model('test') # Get prediction predictions = model.predict(preprocessed_input) predicted_class = np.argmax(predictions) print('Predicted class: ', predicted_class) # Calculate Class Activation Map cam = grad_cam(model, preprocessed_input, predicted_class, nb_classes, 'block2_conv1') ax = plot_grad_cam(cam, raw_input, 'jet', 1) plt.show()
/src/gridsearch_results.py
import pandas as pd import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm def univariant(df, param, quantity='mean_test_score'): unique = df[param].unique() scores = [] for i in unique: scores.append(df[df[param] == i][quantity].mean()) plt.plot(unique, scores) plt.show() def multivariant(df, param1, param2,quantity='mean_test_score'): unique1 = df[param1].unique() unique2 = df[param2].unique() unique1, unique2 = np.meshgrid(unique1, unique2) scores = np.zeros(unique1.shape) for i, p1 in enumerate(unique1[0]): for j, p2 in enumerate(unique2[0]): scores[i, j] = df[(df[param1] == p1) & (df[param2] == p2)][quantity].values.mean() fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(unique1, unique2, scores, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_xlabel(param1) ax.set_ylabel(param2) ax.set_zlabel("Accuracy") plt.show() df = pd.read_csv("..\\results\\cnn.csv") univariant(df, param='param_cnn__len_filter',quantity='mean_score_time')
/src/main.py
import numpy as np import pandas as pd from utils import split_df, TimeSeriesResampler, plot_confusion_matrix, Differentiator from alpaca import Alpaca from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt if __name__ == "__main__": """ IMPORT YOUR DATA HERE X, y = DEFINE RESAMPLING LENGTH IF NEEDED sz = """ # Turn y to numpy array y = np.array(y) # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42) # Pipeline example alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)),('alpaca', Alpaca())]) alpaca.fit(X_train, y_train) """ # Example with additional channel derived from channel 0 alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)), ('differentiator',Differentiator(channel=0)), ('alpaca', Alpaca())]) """ y_pred_bin_veto, y_pred_veto = alpaca.predict(X_test, voting="veto") y_pred_bin_dem, y_pred_dem = alpaca.predict(X_test, voting="democratic") y_pred_bin_meta_dtc, y_pred_meta_dtc = alpaca.predict(X_test, voting="meta_dtc") y_pred_bin_meta_svc, y_pred_meta_svc = alpaca.predict(X_test, voting="meta_svc") # Store all results in a dataframe y_pred_indiv = np.column_stack((y_pred_bin_veto, y_pred_veto,y_pred_bin_dem, y_pred_dem, y_pred_bin_meta_dtc, y_pred_meta_dtc, y_pred_bin_meta_svc, y_pred_meta_svc, y_test)).astype(int) df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_bin_veto', 'y_pred_veto','y_pred_bin_dem', 'y_pred_dem', 'y_pred_bin_meta_dtc','y_pred_meta_dtc', 'y_pred_bin_meta_svc', 'y_pred_meta_svc', 'y_true']) df_results.to_csv("results\\y_pred_total.csv",index=False) print("TEST FINISHED SUCCESSFULLY")
/src/test_time.py
from alpaca import Alpaca from utils import to_time_series_dataset, to_dataset, split_df, TimeSeriesResampler import time import numpy as np import pandas as pd from sklearn.pipeline import Pipeline max_sample = 20 for dataset in ['uc2']: if dataset == 'uc1': X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') y = np.array(y) # Length of timeseries for resampler and cnn sz = 38 # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] if dataset == 'uc2': X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') y = np.array(y) # Length of timeseries for resampler and cnn sz = 200 # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] resampler = TimeSeriesResampler(sz=sz) alpaca = Pipeline([('resampler', resampler), ('classifier', Alpaca())]) alpaca.fit(X, y, classifier__stacked=False, classifier__n_clusters=200) # Measure time for single sample processing t = [] for i in range(1, max_sample+1): for j in range(10): rand = np.random.randint(2000) sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1)) start = time.process_time() for k in range(100): for l in range(i): y_pred_bin, y_pred = alpaca.predict(sample, voting='veto') end = time.process_time() t.append([i, (end-start)/100, 'single']) # Measure time for batch processing of multiple sample numbers for i in range(1, max_sample+1): for j in range(10): rand = np.random.randint(2000) if i == 1: sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1)) else: sample = to_dataset(X[rand:rand+i]) start = time.process_time() for k in range(100): y_pred_bin, y_pred = alpaca.predict(sample, voting='veto') end = time.process_time() t.append([i, (end-start)/100, 'batch']) df = pd.DataFrame(t, columns=['Sample Number', 'Time', 'Type']) df.to_csv("..\\results\\Time_"+dataset+".csv")
/src/test_use_case.py
from alpaca import Alpaca from utils import to_time_series_dataset, split_df, TimeSeriesResampler, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.pipeline import Pipeline import time import numpy as np import pandas as pd # Variables repetitions = 2 if __name__ == "__main__": # For both datasets for dataset in ['uc1']: print("Dataset: ", dataset) results = [] #timing = [] #outliers = [] if dataset == 'uc1': X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') # Length of timeseries for resampler and cnn sz = [38,41] # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] elif dataset == 'uc2': X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') # Length of timeseries for resampler and cnn sz = [200] # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] # For each repetition for r in range(repetitions): print("Repetition #", r) # For each resampling length for s in sz: print("Resampling size:", s) t_start = time.time() # Shuffle for Keras X, y = shuffle(X, y, random_state=r) # Turn y to numpy array y = np.array(y) # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=r) alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=s)), ('classifier', Alpaca())]) alpaca.fit(X_train, y_train, classifier__stacked=False, classifier__n_clusters=200) # Prediction y_pred_bin, y_pred = alpaca.predict(X_test, voting="veto") y_test_bin = np.copy(y_test) y_test_bin[y_test_bin > 0] = 1 # BINARY RESULTS (AD + ENSEMBLE) tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_bin).ravel() # Append overall error results.append([s, r, 'err_bin', (fp + fn) / (tn + fp + fn + tp)]) # Append false negative rate results.append([s, r, 'fnr_bin', fn / (fn + tp)]) # Append false positive rate results.append([s, r, 'fpr_bin', fp / (fp + tn)]) # CLASSIFIER RESULTS y_pred_clf = np.copy(y_pred) y_pred_clf[y_pred_clf != 0] = 1 # Also turn classifier predictions to binary for cfm tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_clf).ravel() # Append overall error results.append([s, r, 'err_ens', (fp + fn) / (tn + fp + fn + tp)]) # Append false negative rate results.append([s, r, 'fnr_ens', fn / (fn + tp)]) # Append false positive rate results.append([s, r, 'fpr_ens', fp / (fp + tn)]) """ # TIMING sample = np.transpose(to_time_series_dataset(X_test[0]), (2, 0, 1)) start = time.time() for i in range(100): alpaca.predict(sample, voting='veto') end = time.time() timing.append([(end - start) * 10, s]) # in ms # SAVE OUTLIERS (with y_pred,y_pred_bin, y_true) idx = np.where(y_test_bin != y_pred_bin) # Flattened curves for i in idx[0]: outliers.append([X_test[i], y_pred[i], y_test[i], y_pred_bin[i], y_test_bin[i]]) """ t_end = time.time() print("Substest finished, duration ",(t_end-t_start)) # SAVE ALL RESULTS PER DATASET df = pd.DataFrame(results, columns=['resampling', 'test', 'metric', 'value']) df.to_csv("..\\results\\Test"+dataset+".csv") #df = pd.DataFrame(timing, columns=['time', 'resampling']) #df.to_csv("..\\results\\Timing"+dataset+".csv") #df = pd.DataFrame(outliers, columns=['sample', 'y_pred', 'y_test', 'y_pred_bin', 'y_test_bin']) #df.to_pickle("..\\results\\Outliers"+dataset+".pkl") #plot_confusion_matrix(y_test_bin.astype(int), y_pred_bin.astype(int), np.array(["0", "1"]), cmap=plt.cm.Blues) #plt.show() #plot_confusion_matrix(y_test.astype(int), y_pred.astype(int), np.array(["0", "1", "2", "3", "?"]), cmap=plt.cm.Greens) #plt.show()
/src/test_voting.py
from alpaca import Alpaca from utils import load_test, split_df, TimeSeriesResampler,confusion_matrix import time from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.pipeline import Pipeline import numpy as np import pandas as pd if __name__ == '__main__': X, y = load_test() # Length of timeseries for resampler and cnn sz = 230 # Number of channels for cnn num_channels = X.shape[-1] # Number of classes for cnn num_classes = np.unique(y).shape[0] classes = np.array(["0", "1", "2", "3", "4", "?"]) repetitions = 1 results = [] outliers = np.empty((0, 230*3+5)) for r in range(repetitions): print("Repetition #",r) X, y = shuffle(X, y, random_state=r) # Turn y to numpy array y = np.array(y) # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=r) for votingstr in ["democratic", "veto", "stacked_svc", "stacked_dtc"]: if votingstr == 'stacked_svc': meta = 'svc' elif votingstr == 'stacked_dtc': meta = 'dtc' if votingstr == 'stacked_svc' or votingstr == 'stacked_dtc': voting = 'stacked' stacked = True else: stacked = False voting = votingstr meta = None # Build pipeline from resampler and estimator resampler = TimeSeriesResampler(sz=sz) alpaca = Pipeline([('resampler', resampler), ('classifier', Alpaca())]) alpaca.fit(X_train, y_train, classifier__stacked=stacked, classifier__n_clusters=100) y_pred_bin, y_pred = alpaca.predict(X_test, voting=voting) # Plot confusion matrix (Binary) y_test_bin = np.copy(y_test) y_test_bin[y_test_bin > 0] = 1 tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_bin).ravel() # Append overall error results.append([votingstr, r, 'err', (fp+fn)/(tn+fp+fn+tp)]) # Append false negative rate results.append([votingstr, r, 'fnr', fn/(fn+tp)]) # Append false positive rate results.append([votingstr, r, 'fpr', fp/(fp+tn)]) # Save misclassified samples (with y_pred,y_pred_bin, y_true, and voting scheme) idx = np.where(y_test_bin != y_pred_bin) # Flattened curves curves = X_test[idx].transpose(0, 2, 1).reshape(X_test[idx].shape[0],-1) vote_type = np.array([votingstr for i in range(idx[0].shape[0])]).reshape((-1,1)) wrong = np.hstack([curves, y_pred[idx].reshape((-1,1)),y_test[idx].reshape((-1,1)), y_pred_bin[idx].reshape((-1,1)),y_test_bin[idx].reshape((-1,1)), vote_type]) outliers = np.vstack((outliers,wrong)) df = pd.DataFrame(outliers) df.to_csv("..\\results\\OutliersVotingTest.csv") df = pd.DataFrame(results, columns=['voting', 'test', 'metric', 'value']) df.to_csv("..\\results\\VotingTest.csv")
/src/utils.py
import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from scipy.stats import kurtosis, skew import numpy as np import pandas as pd from sklearn.base import TransformerMixin, BaseEstimator from sklearn import tree import graphviz # Load the testbench data def load_test(): df = pd.read_pickle('data\\df_test.pkl') pivoted = df.pivot(index='sample_nr',columns='idx') X = np.stack([pivoted['position'].values, pivoted['velocity'].values, pivoted['current'].values], axis=2) y = df.groupby('sample_nr').target.first().values return X, y # Load any dataset (WARNING: predefined length!) def load_data(dataset): if dataset == 'test': X, y = load_test() sz = 230 elif dataset == 'uc1': X, y = split_df(pd.read_pickle('data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') # Length of timeseries for resampler and cnn sz = 38 elif dataset == 'uc2': X, y = split_df(pd.read_pickle('data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') # Length of timeseries for resampler and cnn sz = 200 resampler = TimeSeriesResampler(sz=sz) X = resampler.fit_transform(X, y) y = np.array(y) return X, y # Load and split UC1 and UC2 datasets def split_df(df,index_column, feature_columns, target_name): labels = [] features = [] for id_, group in df.groupby(index_column): features.append(group[feature_columns].values.tolist()) labels.append(group[target_name].iloc[0]) return features, labels # Function to plot confusion matrix def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes[unique_labels(y_true, y_pred)] """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) """ fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) #ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, ylabel='True label', xlabel='Predicted label') # Matplotlib 3.1.1 bug workaround ax.set_ylim(len(cm)-0.5, -0.5) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax def to_time_series_dataset(dataset): """Transforms a time series dataset so that it has the following format: (no_time_series, no_time_samples, no_features) Parameters ---------- dataset : array-like The dataset of time series to be transformed. Returns ------- numpy.ndarray of shape (no_time_series, no_time_samples, no_features) """ assert len(dataset) != 0, 'dataset is empty' try: np.array(dataset, dtype=np.float) except ValueError: raise AssertionError('All elements must have the same length.') if np.array(dataset[0]).ndim == 0: dataset = [dataset] if np.array(dataset[0]).ndim == 1: no_time_samples = len(dataset[0]) no_features = 1 else: no_time_samples, no_features = np.array(dataset[0]).shape return np.array(dataset, dtype=np.float).reshape( len(dataset), no_time_samples, no_features) def to_dataset(dataset): """Transforms a time series dataset so that it has the following format: (no_time_series, no_time_samples, no_features) where no_time_samples for different time sereies can be different. Parameters ---------- dataset : array-like The dataset of time series to be transformed. Returns ------- list of np.arrays (no_time_series, no_time_samples, no_features) """ assert len(dataset) != 0, 'dataset is empty' if np.array(dataset[0]).ndim == 0: dataset = [[d] for d in dataset] if np.array(dataset[0]).ndim == 1: no_features = 1 dataset = [[[d] for d in data] for data in dataset] else: no_features = len(dataset[0][0]) for data in dataset: try: array = np.array(data, dtype=float) except ValueError: raise AssertionError( "All samples must have the same number of features!") assert array.shape[-1] == no_features,\ 'All series must have the same no features!' return dataset class TimeSeriesResampler(TransformerMixin): """Resampler for time series. Resample time series so that they reach the target size. Parameters ---------- no_output_samples : int Size of the output time series. """ def __init__(self, sz): self._sz = sz def fit(self, X, y=None, **kwargs): return self def _interp(self, x): return np.interp( np.linspace(0, 1, self._sz), np.linspace(0, 1, len(x)), x) def transform(self, X, **kwargs): X_ = to_dataset(X) res = [np.apply_along_axis(self._interp, 0, x) for x in X_] return to_time_series_dataset(res) class TimeSeriesScalerMeanVariance(TransformerMixin): """Scaler for time series. Scales time series so that their mean (resp. standard deviation) in each dimension. The mean and std can either be constant (one value per feature over all times) or time varying (one value per time step per feature). Parameters ---------- kind: str (one of 'constant', or 'time-varying') mu : float (default: 0.) Mean of the output time series. std : float (default: 1.) Standard deviation of the output time series. """ def __init__(self, kind='constant', mu=0., std=1.): assert kind in ['time-varying', 'constant'],\ 'axis should be one of time-varying or constant' self._axis = (1, 0) if kind == 'constant' else 0 self.mu_ = mu self.std_ = std def fit(self, X, y=None, **kwargs): X_ = to_time_series_dataset(X) self.mean_t = np.mean(X_, axis=self._axis) self.std_t = np.std(X_, axis=self._axis) self.std_t[self.std_t == 0.] = 1. return self def transform(self, X, **kwargs): """Fit to data, then transform it. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Rescaled time series dataset """ X_ = to_time_series_dataset(X) X_ = (X_ - self.mean_t) * self.std_ / self.std_t + self.mu_ return X_ class Flattener(TransformerMixin): """Flattener for time series. Reduces the dataset by one dimension by flattening the channels""" def __init__(self): pass def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Flattened time series dataset """ X_ = X.transpose(0, 2, 1).reshape(X.shape[0],-1) return X_ class Differentiator(TransformerMixin): """Calculates the derivative of a specified channel and and appends it as new channel""" def __init__(self, channel): """Initialise Featuriser. Parameters ---------- channel int, channel to calculate derivative from """ self.channel = channel def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset Returns ------- numpy.ndarray Time series dataset with new channel """ dt = np.diff(X[:, :, self.channel], axis=1, prepend=X[0, 0, self.channel]) X = np.concatenate((X, np.expand_dims(dt, axis=2)), axis=2) return X class Featuriser(TransformerMixin, BaseEstimator): """Featuriser for time series. Calculates a set of statistical measures on each channel and each defined window of the dataset and returns a flattened matrix to train sklearn models on""" def __init__(self, windows=1): """Initialise Featuriser. Parameters ---------- windows int, number of windows to part the time series in """ self.windows = windows def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Featurised time series dataset """ X_ = np.empty((X.shape[0], 0)) for i in range(X.shape[2]): for window in np.array_split(X[:, :, i], self.windows, axis=1): mean = np.mean(window, axis=1) std = np.std(window, axis=1) min_d = np.min(window, axis=1) min_loc = np.argmin(window, axis=1) max_d = np.max(window, axis=1) max_loc = np.argmax(window, axis=1) # Concatenate all values to a numpy array row = [mean, std, min_d, min_loc, max_d, max_loc] row = np.transpose(np.vstack(row)) X_ = np.hstack([X_, row]) return X_ class Featuriser2(TransformerMixin): """Deprecated. Featuriser for time series. Calculates a set of statistical measures on each channel of the dataset and returns a flattened matrix to train sklearn models on""" def __init__(self): pass def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Featurised time series dataset """ X_ = np.empty((X.shape[0], 0)) for i in range(X.shape[2]): table = np.empty((0, 14)) for x in X[:, :, i]: mean = np.mean(x) var = np.var(x) max_d = x.max() max_loc = np.argmax(x) min_d = x.min() min_loc = np.argmin(x) range_d = max_d - min_d med = np.median(x) first = x[0] last = x[-1] skew_d = skew(x) kurt = kurtosis(x) sum = np.sum(x) mean_abs_change = np.mean(np.abs(np.diff(x))) # Concatenate all values to a numpy array row = [mean, var, med, first, last, range_d, min_d, min_loc, max_d, max_loc, skew_d, kurt, sum, mean_abs_change] row = np.hstack(row) table = np.vstack([table, row]) X_ = np.hstack((X_,table)) return X_ class Cutter(TransformerMixin): """Cuts the last part of the curves.""" def fit(self, X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- list Cut time series dataset """ res = [] for x in X: idx = np.argmax(np.array(x)[:, 0]) res.append(x[:idx]) return res def plot_dtc(dtc): feature_names = [] #channels = ["$pos","$vel","$cur"] # test case #channels = ["$pos","$cur"] # use case 1 #channels = ["$pos","$cur","$vel"] # use case 1 with derived velocity channels = ["$pos","$for"] # use case 2 for var in channels: for i in range(1,int((dtc.n_features_/6/len(channels))+1)): for f in ["{mean}$","{std}$","{min}$","{min-ind}$","{max}$","{max-ind}$"]: feature_names.append('{0}^{1}_{2}'.format(var,i,f)) #target_names = ["0","1","2","3","4"] # test case target_names = ["0","1","2","3"] # use case 1 + 2 dot_data = tree.export_graphviz(dtc, out_file=None, feature_names=feature_names, class_names=target_names, filled=False, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph.format = 'svg' graph.render("models\\dtc")
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
gabilew/Joint-Forecasting-and-Interpolation-of-GS
refs/heads/master
{"/main/seattle_train_sggru_semisupervised.py": ["/data/Load_data.py", "/pytorch_gsp/utils/gsp.py", "/pytorch_gsp/train/train_rnn.py"], "/pytorch_gsp/models/sggru.py": ["/pytorch_gsp/utils/gsp.py"], "/data/Dataloader.py": ["/pytorch_gsp/utils/gsp.py"]}
└── ├── data │ ├── Dataloader.py │ └── Load_data.py ├── main │ ├── __init.py │ └── seattle_train_sggru_semisupervised.py ├── pytorch_gsp │ ├── models │ │ └── sggru.py │ ├── train │ │ └── train_rnn.py │ └── utils │ └── gsp.py └── setup.py
/data/Dataloader.py
import time import numpy as np import pandas as pd import torch import torch.utils.data as utils from pytorch_gsp.utils.gsp import complement def PrepareSequence(data, seq_len = 10, pred_len = 1): time_len = data.shape[0] sequences, labels = [], [] for i in range(time_len - seq_len - pred_len): sequences.append(data[i:i+seq_len]) labels.append(data[i+seq_len+pred_len-1:i+seq_len+pred_len]) return np.asarray(sequences), np.asarray(labels) def SplitData(data, label = None, seq_len = 10, pred_len = 1, train_proportion = 0.7, valid_proportion = 0.2, shuffle = False): max_value = np.max(data) data /= max_value samp_size = data.shape[0] if label is not None: assert(label.shape[0] == samp_size) index = np.arange(samp_size, dtype = int) train_index = int(np.floor(samp_size * train_proportion)) valid_index = int(np.floor(samp_size * ( train_proportion + valid_proportion))) if label is not None: train_data, train_label = data[:train_index+pred_len-1], label[:train_index+pred_len-1] valid_data, valid_label = data[train_index-seq_len:valid_index+pred_len-1], label[train_index-seq_len:valid_index+pred_len-1] test_data, test_label = data[valid_index-seq_len:], label[valid_index-seq_len:] return (train_data, train_label), (valid_data, valid_label), (test_data, test_label), max_value else: train_data = data[:train_index+pred_len-1] valid_data = data[train_index-seq_len:valid_index+pred_len-1] test_data = data[valid_index-seq_len:] return train_data ,valid_data, test_data, max_value def Dataloader(data, label, batch_size = 40, suffle = False): data, label = torch.Tensor(data), torch.Tensor(label ) dataset = utils.TensorDataset(data, label) dataloader = utils.DataLoader(dataset, batch_size = batch_size, shuffle=suffle, drop_last = True) return dataloader def Preprocessing_hop_interp(matrix, A ,sample): unknown = complement(sample,matrix.shape[1]) features_unknown = np.copy(matrix.values) features_unknown[:,unknown] = np.mean(matrix.values[:100,sample]) for node in unknown: neighbors = np.nonzero(A[node])[0] for t in range(features_unknown.shape[0]): features_unknown[np.array([t]), np.array([node])] = np.mean(features_unknown[t, neighbors]) return features_unknown def MaxScaler(data): max_value = np.max(data) return max_value, data/max_value def Preprocessing_GFT(matrix,sample, V , freqs ): x = matrix.T Vf = V[:, freqs] Psi = np.zeros((V.shape[0],x.shape[1])) Psi[sample] = x Tx = (Vf.T@Psi).T return Tx class DataPipeline: def __init__(self, sample, V , freqs ,seq_len, pred_len, gft = True): """ DataPipeline: perform the sampling procedure on the graph signals and create the dataloader object Args: sample (np array): list of graph indices V (2D np array): Laplacian eigenvector matrix freqs (np array): list of frequency indices seq_len (int, optional): size of historical data. Defaults to 10. pred_len (int, optional): number of future samples. Defaults to 1. gft (bool, optional): if Fourier transform should be applied. Defaults to False. """ self.sample = sample self.V = V self.freqs = freqs self.seq_len = seq_len self.pred_len = pred_len self.gft = gft def fit(self,train_data,sample_label = True, batch_size=40, shuffle=True): """ fit: build dataloader for training data Args: train_data (numpy array): train data sample_label (bool, optional): If labels should be sampled for a semisupervised learning. Defaults to True. batch_size (int, optional): batch size. Defaults to 40. shuffle (bool, optional): If samples should be shuffled. Defaults to True. Returns: pytorch Dataloader: train data prepared for training """ train_X, train_y = PrepareSequence(train_data, seq_len = self.seq_len, pred_len = self.pred_len) if self.gft: train_data_freqs = Preprocessing_GFT(train_data[:,self.sample],self.sample, self.V , self.freqs ) train_X_freqs, _ = PrepareSequence(train_data_freqs, seq_len = self.seq_len, pred_len = self.pred_len) train_X = np.concatenate((train_X[:,:,self.sample], train_X_freqs), axis=-1) if sample_label: train_y = train_y.T[self.sample] train_y = train_y.T return Dataloader(train_X, train_y, batch_size, shuffle) def transform(self, data, sample_label = True, batch_size=40,shuffle=True): """ transform: build dataloader for validation and test data Args: train_data (numpy array): train data sample_label (bool, optional): If validation labels should be sampled for a semisupervised learning. Defaults to True. batch_size (int, optional): batch size. Defaults to 40. shuffle (bool, optional): If samples should be shuffled. Defaults to True. Returns: pytorch Dataloader: train data prepared for training """ X, y = PrepareSequence(data, seq_len = self.seq_len, pred_len = self.pred_len) if self.gft: data_freqs = Preprocessing_GFT(data[:,self.sample],self.sample, self.V , self.freqs) X_freqs, _ = PrepareSequence(data_freqs, seq_len = self.seq_len, pred_len = self.pred_len) X = np.concatenate((X[:,:,self.sample], X_freqs), axis=-1) if sample_label: y = y.T[self.sample] y = y.T return Dataloader(X, y, batch_size, shuffle)
/data/Load_data.py
import math import sys import time import numpy as np import pandas as pd from sklearn.metrics.pairwise import rbf_kernel def USA_data(directory ): """"TODO: include the GSOD dataset""" signals = pd.read_csv( directory + 'Usa_temp.csv') if "Unnamed: 0" in signals.columns: signals.drop(columns="Unnamed: 0", inplace = True) A = np.load( directory + 'Adjk10_07-13.npy') return signals, A def Seattle_data(directory , binary=False): """ Seattle_data: https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb Args: directory (str): directory of the seattle loop detector dataset binary (bool, optional): I the matrix should be binary or the RBF kernel should be used on the . Defaults to False. Returns: speed_matrix: graph signals with time in the rows and nodes in the columns A: adjacency matrix FFR: free flow reachability matrices """ speed_matrix = pd.read_pickle( directory + 'speed_matrix_2015',) A = np.load( directory + 'Loop_Seattle_2015_A.npy') if not binary: cor = rbf_kernel(speed_matrix[:1000].T/10) A = cor*(A) e, V = np.linalg.eigh(A) A/=np.max(e) A = A-np.diag(A.diagonal()) FFR_5min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_5min.npy') FFR_10min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_10min.npy') FFR_15min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_15min.npy') FFR_20min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_20min.npy') FFR_25min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_25min.npy') FFR = [FFR_5min, FFR_10min, FFR_15min, FFR_20min, FFR_25min] return speed_matrix, A, FFR
/main/__init.py
import os import sys current_dir = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] sys.path.append(os.path.join(current_dir, 'data')) print(sys.path)
/main/seattle_train_sggru_semisupervised.py
import os import time import torch import argparse import numpy as np import pandas as pd import time from data.Load_data import Seattle_data from data.Dataloader import * from pytorch_gsp.train.train_rnn import Evaluate, Train from pytorch_gsp.utils.gsp import ( greedy_e_opt, spectral_components) from pytorch_gsp.models.sggru import * def n_params(model): params=[] for param in model.parameters(): params.append(param.numel()) return np.sum(params) print(torch.__version__) def training_routine(args): device = 'cuda' if torch.cuda.is_available else 'cpu' if args.device == 'cuda' and device == 'cpu': print("cuda is not available, device set to cpu") else: assert (args.device in ['cpu','cuda']) device = args.device lr = args.lr epochs = args.epochs seq_len = args.seq_len pred_len = args.pred_len patience = args.patience name = args.save_name speed_matrix, A, FFR = Seattle_data('data/Seattle_Loop_Dataset/') #put seattle Loop dataset in this directory N = speed_matrix.shape[1] S = int(args.sample_perc*N/100) if args.F_perc is None: F = int(S/3) else: F = int(args.F_perc*N/100) assert(S>F) # the sampling set must be larger than the spectral support #compute gft F_list, V = spectral_components(A,np.array(speed_matrix)[:1000] ) if args.supervised: freqs = F_list[:F] else: freqs = np.arange(0,F,1) if args.e_opt: print("Using e-optimal greedy algorithm") if args.sample_perc == 25: sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt25.npy')[0] elif args.sample_perc == 50: sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt50.npy')[0] elif args.sample_perc == 75: sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt75.npy')[0] else: sample = greedy_e_opt(V[:,Fs],S) else: sample = np.sort(np.random.choice(np.arange(N), S, replace = False)) S = len(sample) pre_time = time.time() train, valid, test,max_value = SplitData(speed_matrix.values, label = None, seq_len = 10, pred_len = 1, train_proportion = 0.7, valid_proportion = 0.2, shuffle = False) pipeline = DataPipeline(sample,V,freqs,seq_len,pred_len) train_dataloader = pipeline.fit(train) valid_dataloader = pipeline.transform(valid) test_dataloader = pipeline.transform(test,sample_label=False,batch_size = test.shape[0]-seq_len-pred_len,shuffle=False) print("Preprocessing time:", time.time()-pre_time) layer = SpectralGraphForecast(V, sample,freqs, rnn = 'gru') if args.supervised: sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = True).to(device) else: sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = False).to(device) pre_time = time.time() print("Total number of nodes: {}".format(N)) print("Sample size: {}".format(S)) print("Spectral sample size: {}".format(F)) print("Initial learning rate: {}".format(lr)) sggru,sggru_loss= Train(sggru ,train_dataloader, valid_dataloader, epochs = epochs, learning_rate = lr,patience=patience ,sample = sample) print("Training time:", time.time()-pre_time) pre_time = time.time() sggru_test = Evaluate(sggru.to(device), test_dataloader, max_value ) print("Test time:", time.time()-pre_time) name = 'sggru' loss = (sggru_loss,sggru_test) os.makedirs("models_and_losses/", exist_ok=True) torch.save(sggru, "models_and_losses/{}.pt".format(name)) np.save("models_and_losses/{}.npy".format(name),loss) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Semi-Supervised Prediction\n SeattleLoop dataset \n download link: https://github.com/zhiyongc/Seattle-Loop-Data ') parser.add_argument('--epochs', type=int, default = 100, help='maximum number of epochs before stopping training') parser.add_argument('--lr', type=float, default = 1e-4, help='starting learn rate' ) parser.add_argument('--patience', type=int, default = 10, help='number of consecutive non-improving validation loss epochs before stop training') parser.add_argument('--sample-perc', type=int, default = 50, help='percentage of in-sample nodes') parser.add_argument('--F-perc', type=int, default = None, help='percentage of frequencies to keep in frequency set \mathcal{F}') parser.add_argument('--S-perc', type=int, default = 50, help='percentage of samples') parser.add_argument('--e-opt', action='store_true',help='if sampling is performed by E-optmal greedy algorithm') parser.add_argument('--sample-seed',type=int,default=1, help='number of run with uniformely random samples. Only used if --e-opt is False') parser.add_argument('--seq-len', type=int,default=10, help='history length') parser.add_argument('--pred-len', type=int,default=1, help='prediction horizon') parser.add_argument('--save-name', type=str, default='sggru_S50_F53_opt_pred1', help='name of file') parser.add_argument('--supervised', action='store_true', help='if training is supervised or semi-supervised. Deafault is semi-supervised') parser.add_argument('--device', type=str, default='cuda', help='devices: cuda or cpu') args = parser.parse_args() training_routine(args)
/pytorch_gsp/models/sggru.py
import torch.utils.data as utils import torch.nn.functional as F import torch import torch.nn as nn from torch.autograd import Variable from torch.nn.parameter import Parameter import numpy as np import pandas as pd import time from pytorch_gsp.utils.gsp import (spectral_components, Reconstruction) class SpectralGraphForecast(nn.Module): """ SpectralGraphForecast Args: V (numpy array): eingenvectors matrix graph signal processing model (i.e.: Laplacian matrix of the graph) sample (numpy array): indices of in sample nodes freqs (numpy array): frequency components to be used in interpolation rnn (str, optional): predictive model: lstm, gru, 1dconv. Defaults to 'gru'. """ def __init__(self, V, sample,freqs, rnn = 'gru'): super(SpectralGraphForecast, self).__init__() self.N = V.shape[0] # number of nodes in the entire graph self.d = len(freqs) # number of frequencies self.n = len(sample) # number of samples self.sample = sample if rnn == 'gru': self.srnn = nn.GRU(self.d,self.d,1, batch_first=True) self.rnn =nn.GRU(self.n,self.n,1, batch_first=True) elif rnn == 'lstm': self.srnn = nn.LSTM(self.d,self.d,1, batch_first=True) self.rnn =nn.LSTM(self.n,self.n,1, batch_first=True) elif rnn == '1dconv': self.srnn = nn.Conv1d(self.d,self.d,1, batch_first=True) self.rnn =nn.Conv1d(self.n,self.n,1, batch_first=True) if self.n != self.N: self.interpolate = Reconstruction(V,sample,freqs, domain='spectral') self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex') self.linear = nn.Linear(self.N*2,self.N) def forward(self, input): x = input[:,:,:self.n] x_hat = input[:,:,self.n:] bz, seq_len, _ = x.size() x_hat = self.srnn(x_hat)[0][:,-1,:] if self.n != self.N: xtilde = self.interpolate(x_hat).unsqueeze(1) else: xtilde = x_hat.unsqueeze(1) x = self.rnn(x)[0][:,-1,:] if self.n != self.N: x1 = self.interpolate2(x) x1[:,self.sample] = x else: x1 = x x1 = x1.unsqueeze(1) x1 = torch.cat((xtilde,x1),dim = 1).reshape((bz, self.N*2)) return self.linear(x1) class SpectralGraphForecast2(nn.Module): """ SpectralGraphForecast2: combination of predictive models in both spectral and vertex domains Args: V (numpy array): eingenvectors matrix graph signal processing model (i.e.: Laplacian matrix of the graph) sample (numpy array): indices of in sample nodes freqs (numpy array): frequency components to be used in interpolation rnn (str, optional): predictive model: lstm, gru, . Defaults to 'gru'. """ def __init__(self, V, sample,freqs, rnn = 'gru'): super(SpectralGraphForecast2, self).__init__() self.N = V.shape[0] self.d = len(freqs) self.n = len(sample) self.sample = sample if rnn == 'gru': self.srnn = nn.GRU(self.d,self.d,1, batch_first=True) self.rnn =nn.GRU(self.n,self.n,1, batch_first=True) elif rnn == 'lstm': self.srnn = nn.LSTM(self.d,self.d,1, batch_first=True) self.rnn =nn.LSTM(self.n,self.n,1, batch_first=True) if self.n != self.N: self.interpolate = Reconstruction(V,sample,freqs, domain='sprctral') self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex') self.w = Parameter(torch.Tensor(self.N), requires_grad=True) self.w.data.fill_(0.01) def forward(self, input): x = input[:,:,:self.n] x_hat = input[:,:,self.n:] bz, seq_len, _ = x.size() x_hat = self.srnn(x_hat)[0][:,-1,:] if self.n != self.N: xtilde = self.interpolate(x_hat) else: xtilde = x_hat x = self.rnn(x)[0][:,-1,:] if self.n != self.N: x1 = self.interpolate2(x) return torch.tanh(self.w)*xtilde + (1-torch.tanh(self.w))*x1 class model(nn.Module): def __init__(self, V, sample,freqs, layer, supervised = True, l1=0,l2=0, schedule_step=10): """ model: model class to use the SpectralGraphForecast layer Args: V (numpy array): eingenvector matrix graph from signal processing model (i.e.: Laplacian matrix of the graph) sample (numpy array): indices of in sample nodes freqs (numpy array): frequency components to be used in interpolation layer (nn.Module): SpectralGraphForecast layer """ super(model, self).__init__() self.N = V.shape[0] self.d = len(freqs) self.n = len(sample) self.supervised = supervised self.sample = sample self.layer = layer self.l1 = l1 self.l2 = l2 self.schedule_step = schedule_step if not supervised: self.interpolate = Reconstruction(V,sample,freqs, domain='vertex') def forward(self, input): return self.layer(input) def loss(self,out,y): assert (self.l1+self.l2 <=1) assert(self.l1>=0) assert(self.l2>=0) regularization_loss = 0 if self.l1 != 0: regularization_loss += self.l1*torch.nn.L1Loss()(y[:,self.sample],out[:,self.sample]) if self.l2 != 0: regularization_loss += self.l2*torch.norm(y[:,self.sample]-out[:,self.sample]) if not self.supervised: ys = y y = self.interpolate(ys) y[:,self.sample] = ys return torch.nn.MSELoss()(y,out) + regularization_loss def schedule(self,opt): for param_group in opt.param_groups: learning_rate = param_group['lr'] if learning_rate > 1e-5: lamb = lambda epoch: 0.5 if epoch%10 == 0 else 1 else: lamb = lambda epoch: 1 if epoch%10 == 0 else 1 return torch.optim.lr_scheduler.MultiplicativeLR(opt, lr_lambda=[lamb]) class model2(nn.Module): def __init__(self, V, sample,freqs, layer,l1=0,l2=0,schedule_step=10, supervised = True, unsqueeze=False): super(model2, self).__init__() """ model2: interepolates the signal before running the layer. Args: V (numpy array): eingenvector matrix graph from signal processing model (i.e.: Laplacian matrix of the graph) sample (numpy array): indices of in sample nodes freqs (numpy array): frequency components to be used in interpolation layer (nn.Module): layer """ self.N = V.shape[0] self.d = len(freqs) self.n = len(sample) self.supervised = supervised self.sample = sample self.unsqueeze = unsqueeze self.layer = layer self.l1 = l1 self.l2 = l2 self.schedule_step = schedule_step self.interpolate2 = Reconstruction(V,sample,freqs, domain='vertex') if not supervised: self.interpolate = Reconstruction(V,sample,freqs, domain='vertex') self.linear = torch.nn.Linear(self.N,self.N) def forward(self, input): bz, seq_len, N = input.size() if self.unsqueeze: x = input.unsqueeze(dim=1) x = self.layer(input) if N < self.N: x1 = self.interpolate2(x) x1[:,self.sample] = x else: x1 = x return x1 def loss(self,out,y): assert (self.l1+self.l2 <1) assert(self.l1>=0) assert(self.l2>=0) regularization_loss = 0 if self.l1 != 0: regularization_loss += self.l1*torch.nn.L1Loss()(y[:,self.sample],out[:,self.sample]) if self.l2 != 0: regularization_loss += self.l2*torch.norm(y[:,self.sample]-out[:,self.sample]) if not self.supervised: ys = y y = self.interpolate(ys) y[:,self.sample] = ys return torch.nn.MSELoss()(y,out) + regularization_loss def schedule(self,opt): for param_group in opt.param_groups: learning_rate = param_group['lr'] if learning_rate > 1e-5: lamb = lambda epoch: 1/2 if epoch%self.schedule_step == 0 else 1 else: lamb = lambda epoch: 1 if epoch%5 == 0 else 1 return torch.optim.lr_scheduler.MultiplicativeLR(opt, lr_lambda=[lamb])
/pytorch_gsp/train/train_rnn.py
### training code #### import sys import time import numpy as np import torch from torch.autograd import Variable toolbar_width=20 def Train(model, train_dataloader, valid_dataloader, learning_rate = 1e-5, epochs = 300, patience = 10, verbose=1, gpu = True, sample = None, optimizer = 'rmsprop'): if optimizer == 'rmsprop': optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate) elif optimizer == 'adam': optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate ) loss_MSE = torch.nn.MSELoss() loss_L1 = torch.nn.L1Loss() batch_size = train_dataloader.batch_size if gpu: device='cuda' else: device= 'cpu' losses_epochs_train = [] losses_epochs_valid = [] time_epochs = [] time_epochs_val = [] is_best_model = 0 patient_epoch = 0 scheduler = model.schedule(optimizer) for epoch in range(epochs): pre_time = time.time() try: data_size=train_dataloader.dataset.data_size except: pass try: data_size=train_dataloader.dataset.tensors[0].shape[0] except: pass n_iter=data_size/train_dataloader.batch_size if verbose: count=0 checkpoints=np.linspace(0,n_iter,toolbar_width).astype(np.int16) text='Epoch {:02d}: '.format(epoch) sys.stdout.write(text+"[%s]" % (" " * toolbar_width)) sys.stdout.flush() sys.stdout.write("\b" * (toolbar_width+1)) losses_train = [] losses_valid = [] for data in train_dataloader: inputs, labels = data if inputs.shape[0] != batch_size: continue model.zero_grad() outputs = model(inputs.to(device)) outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device) loss_train = model.loss(outputs,y) losses_train.append(loss_train.cpu().data.numpy()) optimizer.zero_grad() loss_train.backward() optimizer.step() if verbose: if count in checkpoints: sys.stdout.write('=') sys.stdout.flush() count+=1 for param_group in optimizer.param_groups: learning_rate = param_group['lr'] if learning_rate >1e-5: scheduler.step() time_epochs.append(time.time()-pre_time) pre_time = time.time() losses_valid = [] for data in valid_dataloader: inputs, labels = data if inputs.shape[0] != batch_size: continue outputs= model(inputs.to(device)) outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device) losses_valid.append(model.loss(outputs, y).cpu().data.numpy()) time_epochs_val.append(time.time()-pre_time) losses_epochs_train.append(np.mean(losses_train)) losses_epochs_valid.append(np.mean(losses_valid)) avg_losses_epoch_train = losses_epochs_train[-1] avg_losses_epoch_valid = losses_epochs_valid[-1] if avg_losses_epoch_valid >100000000000: print("Diverged") return (None,None) if epoch == 0: is_best_model = True best_model = model min_loss = avg_losses_epoch_valid else: if min_loss - avg_losses_epoch_valid > 1e-6: is_best_model = True best_model = model min_loss = avg_losses_epoch_valid patient_epoch = 0 else: is_best_model = False patient_epoch += 1 if patient_epoch >= patience: print('Early Stopped at Epoch:', epoch) break if verbose: sys.stdout.write("]") print(' train loss: {}, valid loss: {}, time: {}, lr: {}'.format( \ np.around(avg_losses_epoch_train, 6),\ np.around(avg_losses_epoch_valid, 6),\ np.around([time_epochs[-1] ] , 2),\ learning_rate) ) return best_model, [losses_epochs_train , losses_epochs_valid , time_epochs , time_epochs_val ] def Evaluate(model, dataloader, scale=1, pred_len = 1, gpu = True): batch_size = dataloader.batch_size pre_time = time.time() gpu = torch.cuda.is_available() if gpu: device='cuda' else: device= 'cpu' losses_mse = [] losses_l1 = [] losses_mape = [] for i,data in enumerate(dataloader): inputs, labels = data if inputs.shape[0] != batch_size: continue outputs = model(inputs.to(device)) outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device) loss_mse = torch.nn.MSELoss()(outputs*scale, y*scale).cpu().data loss_l1 = torch.nn.L1Loss()(outputs*scale, y*scale).cpu().data outputs = outputs.cpu().data.numpy() y = y.cpu().data.numpy() outputs = outputs*scale y = y*scale abs_diff = np.abs((outputs-y)) abs_y = np.abs(y) abs_diff=abs_diff[abs_y>1] abs_y=abs_y[abs_y>1] loss_mape = abs_diff/abs_y loss_mape = np.mean(loss_mape)*100 losses_mse.append(loss_mse) losses_l1.append(loss_l1) losses_mape.append(loss_mape) losses_l1 = np.array(losses_l1) losses_mse = np.array(losses_mse) mean_l1 = np.mean(losses_l1, axis = 0) rmse = np.mean(np.sqrt(losses_mse)) print('Test: MAE: {}, RMSE : {}, MAPE : {}'.format(mean_l1, rmse,np.mean(losses_mape))) return [losses_l1, losses_mse, mean_l1, np.mean(losses_mape), time.time()-pre_time] ### modified from https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb
/pytorch_gsp/utils/gsp.py
import torch import torch.nn as nn import numpy as np from torch.autograd import Variable import scipy from sklearn.metrics.pairwise import rbf_kernel def complement(S,N): V = set(np.arange(0,N,1)) return np.array(list(V-set(S))) class Reconstruction(nn.Module): def __init__(self,V, sample, freqs, domain='vertex',use_original_set = False, device = 'cuda'): """ GSP reconstruction of Graph signals Args: V (numpy array): eigenvector matrix of Laplacian or adjacency. This matrix is expected to be orthonormal. sample (list-like): list of indices of in-sample nodes freqs (list): number of list of indices of domain (str, optional): [description]. domain of the graph signal. Options are vertex or spectral'. Defaults to 'vertex'. use_original_set (bool, optional): [description]. Defaults to False. """ super(Reconstruction, self).__init__() assert(domain in ['vertex','spectral']) if domain == 'vertex': interp = Interpolator(V, sample, freqs) elif domain == 'spectral': interp= Interpolator(V, sample, freqs, freq=True) self.Interp = torch.Tensor(interp).to(device) self.N = V.shape[0] if use_original_set: self.sample = sample else: self.sample = None def forward(self,x): x0 = x n_dim = len(x.size()) if n_dim == 3: bz, seq_len, n = x.size() x = x.T x = x.reshape((n, bz*seq_len)) x = torch.matmul(self.Interp,x) x = x.reshape((self.N,seq_len,bz)).T else: bz, n = x.size() x = x.T x = x.reshape((n, bz)) x = torch.matmul(self.Interp,x) x = x.reshape((self.N,bz)).T return x def corrMatrix(A, x): """ corrMatrix compute an adjacency matrix with radial basis function entries Args: A (2D numpy array): adjacency matrix x (2D numpy array): signals to be used to compute correlations Returns: 2D numpy array: adjacency matrix """ cor = rbf_kernel(x.T/10) A = cor*(A) e, _ = np.linalg.eigh(A) A/=np.max(e) return A-np.diag(A.diagonal()) def spectral_components(A, x, return_vectors = True,lap = True, norm = False): """ spectral_components: compute the index of spectral components with largest magnitude in a set of graph signals Args: A (2d numpy array): adjacency matrix x (2d numpy array): graph signals with time in the rows and nodes in the columns return_vectors (bool, optional): [description]. Defaults to True. lap (bool, optional): If it is the spectral components are computed using the laplacian. Defaults to True. norm (bool, optional): [description]. If the matrix should be normalized as $D^{-1/2}AD^{-1/2}$. Returns: [type]: [description] """ if lap: if norm: d = 1/np.sqrt(A.sum(axis=1)) D=np.diag(d) I = np.diag(np.ones(A.shape[0])) L = I - D@A@D else: D = np.diag(A.sum(axis=1)) L = D - A else: if norm: d = 1/np.sqrt(A.sum(axis=1)) D=np.diag(d) I = np.diag(np.ones(A.shape[0])) L = D@A@D else: L = A lambdas, V = np.linalg.eigh(L) energy = np.abs(V.T@x.T).T index = [] for y in energy: index.append(list(np.argsort(y))) ocorrencias = {i:0 for i in range(x.shape[1]) } for y in index: for i in y: ocorrencias[i]+= y.index(i) F_global= np.argsort([ocorrencias[oc] for oc in ocorrencias])[::-1] if return_vectors: return F_global, V else: return F_global def Interpolator(V, sample, freqs, freq = False): Vf = V[:,freqs] Psi = np.zeros(Vf.shape[0]) Psi[sample] = 1 #transpose of the sampling operator \Psi Psi = np.diag(Psi) I = np.identity(Vf.shape[0]) inv = scipy.linalg.inv(Vf.T@Psi@Vf) if freq == False: pseudoi = inv@Vf.T@Psi[:, sample] else: pseudoi = inv interp = np.dot(Vf, pseudoi) Psi_bar = I - Psi s = np.linalg.svd(np.dot(Psi_bar, Vf), compute_uv=False) if np.max(s)>1: print("Samling is not admissable") return None return interp class KNN(nn.Module): def __init__(self,A,sample, matrix): super(KNN,self).__init__() N = A.shape[0] self.unknown = complement(sample,N) self.mask = np.mean(matrix.values[:,sample]) def forward(self, input): if len(input.size()) == 2: input[:,self.unknown] = self.mask elif len(input.size()) == 3: input[:,:,self.unknown] = self.mask elif len(input.size()) == 4: input[:,:,:,self.unknown] = self.mask x = input for node in self.unknown: neighbors = np.nonzero(A[node])[0] x[:,:,[node]] = torch.mean(x[:,:, neighbors], dim=-1) return x def greedy_e_opt(Uf, S): """ code from https://github.com/georgosgeorgos/GraphSignalProcessing, please refer to this repository MIT License Copyright (c) 2018 Giorgio Giannone Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. greedy_e_opt: sample S nodes from a set of size N where N is the number of rows in matrix Uf Args: Uf (2D numpy array): truncated eigenvector matrix with N rows. Columns correspond to the selected eigenvectors S (int): sample size Returns: sample: list of indices of selected nodes """ index_set = set() sample=[] n = Uf.shape[0] - 1 k = 0 I = np.diag(np.ones(Uf.shape[0])) while len(index_set) < S: i = -1 i_best = -1 old_list = [] sigma_best = np.inf while i < n: i = i + 1 if i in index_set: continue else: Ds_list = np.zeros(Uf.shape[0]) ix = sample + [i] Ds_list[ix] = 1 Ds = np.diag(Ds_list) Ds_bar = I - Ds DU = np.dot(Ds_bar, Uf) s = np.linalg.svd(DU, compute_uv=False) sigma_max = max(s) if sigma_max < sigma_best and sigma_max != -np.inf: sigma_best = sigma_max i_best = i k = k + 1 index_set.add(i_best) sample.append(i_best) return sample
/setup.py
from setuptools import setup, find_packages setup( name='Joint-Forecasting-and-Interpolation-of-Graph-Signals-Using-Deep-Learning', version='0.1.0', author='Gabriela Lewenfus', author_email='gabriela.lewenfus@gmail.com', packages=find_packages(), install_requires = ['scipy>=1.4.1', 'pandas>=0.15', 'scikit-learn>=0.22', 'numpy>=0.46'], description='Code from the paper Joint Forecasting and Interpolation of Graph Signals Using Deep Learning', )
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
AntLouiz/DatapathWay
refs/heads/master
{"/core.py": ["/control.py", "/logic.py", "/memory.py"], "/instructions.py": ["/utils.py"], "/logic.py": ["/utils.py"], "/memory.py": ["/utils.py"], "/control.py": ["/utils.py"], "/main.py": ["/core.py"]}
└── ├── control.py ├── core.py ├── instructions.py ├── li.py ├── logic.py ├── main.py ├── memory.py └── utils.py
/control.py
import abc from utils import to_integer, to_decimalC2 class BaseControl(abc.ABC): def __init__(self, cpu): self.cpu = cpu @abc.abstractmethod def execute(self): pass class ControlAdd(BaseControl): def execute(self): instruction = self.cpu.pc.next_instruction registers = instruction.get_registers() print(instruction) rd = registers['rd'] rs = registers['rs'] print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs))) rt = registers['rt'] print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt))) register_data1 = self.cpu.registers.get_value(rs) print("Read data 1: {}".format(register_data1, )) register_data2 = self.cpu.registers.get_value(rt) print("Read data 2: {}".format(register_data2, )) print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1))) print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2))) alu_result = self.cpu.alu.makeSum(register_data1, register_data2) print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result))) self.cpu.registers.set_value(rd, alu_result) print("Write data: {}".format(alu_result, )) print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd))) print("{}".format("-" * 64)) print("\n\n") class ControlSub(BaseControl): def execute(self): instruction = self.cpu.pc.next_instruction registers = instruction.get_registers() print(instruction) rd = registers['rd'] rs = registers['rs'] print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs))) rt = registers['rt'] print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt))) register_data1 = self.cpu.registers.get_value(rs) print("Read data 1: {}".format(register_data1)) register_data2 = self.cpu.registers.get_value(rt) print("Read data 2: {}".format(register_data2)) print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1))) print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2))) alu_result = self.cpu.alu.makeSub(register_data1, register_data2) print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result))) self.cpu.registers.set_value(rd, alu_result) print("Write data: {}".format(alu_result)) print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd))) print("{}".format("-" * 64)) print("\n\n") class ControlAnd(BaseControl): def execute(self): instruction = self.cpu.pc.next_instruction registers = instruction.get_registers() print(instruction) rd = registers['rd'] rs = registers['rs'] print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs))) rt = registers['rt'] print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt))) register_data1 = self.cpu.registers.get_value(rs) print("Read data 1: {}".format(register_data1)) register_data2 = self.cpu.registers.get_value(rt) print("Read data 2: {}".format(register_data2)) print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1))) print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2))) alu_result = self.cpu.alu.makeAnd(register_data1, register_data2) print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result))) self.cpu.registers.set_value(rd, alu_result) print("Write data: {}".format(alu_result)) print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd))) print("{}".format("-" * 64)) print("\n\n") class ControlOr(BaseControl): def execute(self): instruction = self.cpu.pc.next_instruction registers = instruction.get_registers() print(instruction) rd = registers['rd'] rs = registers['rs'] print("Read the register 1: {}{}[{}]".format(rs, ' '*25, to_integer(rs))) rt = registers['rt'] print("Read the register 2: {}{}[{}]".format(rt, ' '*25, to_integer(rt))) register_data1 = self.cpu.registers.get_value(rs) print("Read data 1: {}".format(register_data1)) register_data2 = self.cpu.registers.get_value(rt) print("Read data 2: {}".format(register_data2)) print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1))) print("ALU-in-2: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2))) alu_result = self.cpu.alu.makeOr(register_data1, register_data2) print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result))) self.cpu.registers.set_value(rd, alu_result) print("Write data: {}".format(alu_result)) print("Write register: {}{}[{}]".format(rd, ' '*30, to_integer(rd))) print("{}".format("-" * 64)) print("\n\n") class ControlLw(BaseControl): def execute(self): instruction = self.cpu.pc.next_instruction registers = instruction.get_registers() offset = instruction.get_offset() print(instruction) rt = registers['rt'] rs = registers['rs'] print("Read the register 1:{}{}{}[{}]".format(' '*20, rs, ' '*6, to_integer(rs))) register_data = self.cpu.registers.get_value(rs) print("Read data 1: {}".format(register_data)) print("ALU-in-1: {}{}[{}]".format(register_data, ' '*6, to_decimalC2(register_data))) print("ALU-in-2: {}{}[{}]".format(offset, ' '*6, to_decimalC2(offset))) alu_result = self.cpu.alu.makeSum(register_data, offset) print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result))) print("Address: {}".format(alu_result)) memory_data = self.cpu.memory.get_value(alu_result) print("Read data: {}".format(memory_data)) self.cpu.registers.set_value(rt, memory_data) print("Write data: {}{}[{}]".format(memory_data, ' '*6, to_decimalC2(memory_data))) print("Write register:{}{}{}[{}]".format(' '*25, rt, ' '*6, to_integer(rt))) print("{}".format("-" * 64)) print("\n\n") class ControlSw(BaseControl): def execute(self): instruction = self.cpu.pc.next_instruction registers = instruction.get_registers() offset = instruction.get_offset() print(instruction) rs = registers['rs'] print("Read the register 1:{}{}{}[{}]".format(' '*20, rs, ' '*6, to_integer(rs))) rt = registers['rt'] print("Read the register 2:{}{}{}[{}]".format(' '*20, rt, ' '*6, to_integer(rt))) register_data1 = self.cpu.registers.get_value(rs) print("Read data 1: {}".format(register_data1)) register_data2 = self.cpu.registers.get_value(rt) print("Read data 2: {}".format(register_data2)) print("ALU-in-1: {}{}[{}]".format(register_data1, ' '*6, to_decimalC2(register_data1))) print("ALU-in-2: {}{}[{}]".format(offset, ' '*6, to_decimalC2(offset))) alu_result = self.cpu.alu.makeSum(register_data1, offset) print("ALU-result: {}{}[{}]".format(alu_result, ' '*6, to_decimalC2(alu_result))) print("Address: {}".format(alu_result)) self.cpu.memory.set_value(alu_result, register_data2) print("Write data: {}{}[{}]".format(register_data2, ' '*6, to_decimalC2(register_data2))) print("{}".format("-" * 64)) print("\n\n")
/core.py
from memory import RegistersBank, Memory from logic import ALU from instructions import PC from control import ( ControlSw, ControlLw, ControlAdd, ControlSub, ControlAnd, ControlOr, ) class CPU: def __init__(self): self.alu = ALU() self.pc = PC() self.registers = RegistersBank() self.memory = Memory() self.control_types = { 'add': ControlAdd(self), 'sub': ControlSub(self), 'and': ControlAnd(self), 'or': ControlOr(self), 'lw': ControlLw(self), 'sw': ControlSw(self) } def execute(self): for instruction in self.pc.get_instructions(): instruction_func = instruction.get_func() self.control_types[instruction_func].execute()
/instructions.py
from li import FUNCTIONS from utils import extend_to_bits class MipsInstruction: op = None rs = None rt = None rd = None shamt = None func = None offset = None instruction_type = None instruction = None def __init__(self, instruction): if not (isinstance(instruction, str) or len(instruction) == 32): raise Exception() self.instruction = instruction.replace('\n', '') self.op = self.instruction[:6] if self.op == '000000': self._configure_to_registers() else: self._configure_to_imediate() def _configure_to_imediate(self): self.instruction_type = 'I' self.rs = self.instruction[6:11] self.rt = self.instruction[11:16] self.offset = self.instruction[16:32] return self.instruction def _configure_to_registers(self): self.instruction_type = 'R' self.rs = self.instruction[6:11] self.rt = self.instruction[11:16] self.rd = self.instruction[16:21] self.shamt = self.instruction[21:26] self.func = self.instruction[26:32] return self.instruction def has_offset(self): if self.instruction_type == 'R': return False return True def get_type(self): return self.instruction_type def get_function(self): return self.func def get_registers(self): registers = { 'rs': self.rs, 'rt': self.rt, 'rd': self.rd } return registers def get_offset(self): if not self.has_offset(): return None return extend_to_bits(self.offset) def get_func(self): if self.op != '000000': return FUNCTIONS[self.op] return FUNCTIONS[self.func] def __repr__(self): representation = "-" * 64 representation += \ "\nInstruction: {}\nType: {}\nOperation: {}\n".format( self.instruction, self.instruction_type, self.get_func() ) representation += "-" * 64 return representation class PC: def __init__(self, filename="instructions_file.txt"): self.file = open(filename, 'r') self.next_instruction = None def get_instructions(self): """ Return a mips instruction object for each instruction in the file """ for instruction in self.file.readlines(): if self.next_instruction: self.next_instruction = MipsInstruction(instruction) else: self.next_instruction = MipsInstruction(instruction) yield self.next_instruction
/li.py
# Intruçoes que o programa reconhece FUNCTIONS = { '101011': 'sw', '100011': 'lw', '100000': 'add', '100010': 'sub', '100101': 'or', '100100': 'and' }
/logic.py
from utils import ( extend_to_bits, to_binary, to_integer, to_binaryC2, to_decimalC2 ) class ALU: def makeSum(self, a, b): result = to_decimalC2(a) + to_decimalC2(b) if result > (2**31 -1) or result < -(2**31): print("{}OVERFLOW OCURRENCE{}".format("-" * 20, "-" * 7)) result = to_binaryC2(result) return result def makeSub(self, a, b): result = to_decimalC2(a) - to_decimalC2(b) if result > (2**31 -1) or result < -(2**31): print("{}OVERFLOW OCURRENCE".format("-" * 26)) result = to_binaryC2(result) return result def makeAnd(self, a, b): a = int(a, 2) b = int(b, 2) result = to_binary((a & b)) return extend_to_bits(result) def makeOr(self, a, b): a = int(a, 2) b = int(b, 2) result = to_binary((a | b)) return extend_to_bits(result) def makeNot(self, a): a_len = len(a) a = to_decimalC2(a) result = to_binaryC2(~a, a_len) return result
/main.py
from core import CPU if __name__ == "__main__": cpu = CPU() cpu.execute()
/memory.py
import random from utils import to_binary, extend_to_bits, to_binaryC2 class BaseMemory: def __init__(self): self.data = {} def set_value(self, address, value): """ Set a value with a given address """ self.data[address] = value return True def get_value(self, address): """ Return a value with a given address """ return self.data[address] class RegistersBank(BaseMemory): data = {} def __new__(cls, *args, **kwargs): """ Make the BaseMemory a Monostate class """ obj = super(RegistersBank, cls).__new__(cls, *args, **kwargs) obj.__dict__ = cls.data return obj def __init__(self): total_registers = 2**5 for i in range(total_registers): binary_number = to_binary(i) if len(binary_number) < 5: zero_fill = 5 - len(binary_number) binary_number = "{}{}".format( "0" * zero_fill, binary_number ) if i == 8: self.data[binary_number] = extend_to_bits(to_binary(16)) else: self.data[binary_number] = False class Memory(BaseMemory): data = {} def __new__(cls, *args, **kwargs): """ Make the BaseMemory a Monostate class """ obj = super(Memory, cls).__new__(cls, *args, **kwargs) obj.__dict__ = cls.data return obj def __init__(self): total_data = 2**8 for i in range(total_data): binary_number = to_binary(i) binary_number = extend_to_bits(to_binary(i)) random_number = to_binaryC2( random.randint(-(2**31), (2**31) - 1) ) self.data[binary_number] = random_number
/utils.py
def to_integer(binary_number): if not isinstance(binary_number, str): raise Exception() return int(binary_number, 2) def to_binary(number): if not isinstance(number, int): raise Exception() return "{:0b}".format(number) def extend_to_bits(binary_number, bits = 32): if not isinstance(binary_number, str): return None number_length = len(binary_number) result = bits - number_length zero_fill = "0" * result return "{}{}".format(zero_fill, binary_number) def to_binaryC2(number, bits = 32): if not isinstance(number, int): raise Exception() if number >= 0 : number = to_binary(number) number = extend_to_bits(number, bits) return number else: number = 2**bits + number number = to_binary(number) number = extend_to_bits(number, bits) return number def to_decimalC2(binary_number): if not isinstance(binary_number, str): return None bits = len(binary_number) decimal = int(binary_number, 2) if binary_number[0] == '0': return decimal else: decimal = - (2**bits) + decimal return decimal
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
zhuliyi10/python_demo
refs/heads/master
{"/models/mymodule_demo.py": ["/models/mymodule.py"]}
└── ├── base.py ├── function │ ├── function.py │ ├── function_key.py │ └── total.py ├── if.py ├── input_output │ ├── pickling.py │ ├── user_input.py │ └── using_file.py └── models ├── mymodule.py ├── mymodule_demo.py └── using_sys.py
/base.py
age = 20 name = 'zhuly' print('{0} was {1} years old'.format(name, age))
/function/function.py
def sayHello(): print('hello world,hello python!') sayHello()
/function/function_key.py
def func(a, b=5, c=10): print('a=', a, ' b=', b, ' c=', c) func(2, 7) func(2, c=23) func(c=23,a=9)
/function/total.py
def total(a=5,*numbers,**phonebook): print('a',a) #通过元组遍历全部的参数 for item in numbers: print('num_item',item) #通过字典遍历全部的参数 for first,second in phonebook.items(): print(first,second) total(10,1,2,3,Name='zhuly',age=26)
/if.py
number = 23 while True: guess = int(input('请输入一个整数:')) if guess == number: print('恭喜,你猜对了。') break elif guess < number: print('你猜小了') else: print('你猜大了') print('end')
/input_output/pickling.py
import pickle # 我们将要存储对象的文件名 shoplistfile = 'shoplist.data' # 购物清单 shoplist = ['苹果', '芒果', '胡萝卜'] # 定到文件 f = open(shoplistfile, 'wb') pickle.dump(shoplist, f) f.close() del shoplist # 释放shoplist变量 # 从仓库读回 f = open(shoplistfile, 'rb') storedlist = pickle.load(f) f.close() print(storedlist)
/input_output/user_input.py
def reverse(text): return text[::-1] def is_palindrome(text): return text == reverse(text) something=input('输入文本:') if is_palindrome(something): print("是的,这是回文") else: print("这不是回文")
/input_output/using_file.py
poem = '''\ 当工作完成时 编程是有趣的 如果想让你的工作有趣 使用Python! ''' f = open('poem.txt', 'w') f.write(poem) f.close() f = open('poem.txt', 'r') while(True): line = f.readline() if len(line) == 0: break print(line, end='') f.close()
/models/mymodule.py
def sayhello(): print('hello wolrd,hello python!') __version__='0.1'
/models/mymodule_demo.py
from mymodule import sayhello,__version__ sayhello() print('version:',__version__)
/models/using_sys.py
import sys print('命令行参数是:') for i in sys.argv: print(i) print("python path is in ",sys.path)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
moddevices/mod-devel-cli
refs/heads/master
{"/modcli/bundle.py": ["/modcli/utils.py"], "/modcli/config.py": ["/modcli/utils.py"]}
└── ├── modcli │ ├── __init__.py │ ├── auth.py │ ├── bundle.py │ ├── cli.py │ ├── config.py │ ├── settings.py │ └── utils.py └── setup.py
/modcli/__init__.py
from modcli import config __version__ = '1.1.3' context = config.read_context()
/modcli/auth.py
import socket import webbrowser from http.server import BaseHTTPRequestHandler, HTTPServer from urllib import parse import click import requests from click import Abort from modcli import __version__ def login(username: str, password: str, api_url: str): result = requests.post('{0}/users/tokens'.format(api_url), json={ 'user_id': username, 'password': password, 'agent': 'modcli:{0}'.format(__version__), }) if result.status_code != 200: raise Exception('Error: {0}'.format(result.json()['error-message'])) return result.json()['message'].strip() def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port def login_sso_detached(api_url: str): click.echo('Running in detached mode...') click.echo('1) Open this url in any browser: {0}'.format('{0}/users/tokens_sso'.format(api_url))) click.echo('2) The URL will automatically redirect to MOD Forum (https://forum.moddevices.com)') click.echo('3) Once MOD Forum page loads, if asked, enter your credentials or register a new user') click.echo('4) A JWT token will be displayed in your browser') try: token = click.prompt('Copy the token value and paste it here, then press ENTER') return token.strip() except Abort: exit(1) def login_sso(api_url: str): server_host = 'localhost' server_port = get_open_port() local_server = 'http://{0}:{1}'.format(server_host, server_port) class SSORequestHandler(BaseHTTPRequestHandler): token = '' def do_HEAD(self): self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() def do_GET(self): response = self.handle_http(200) _, _, _, query, _ = parse.urlsplit(self.path) result = parse.parse_qs(query) tokens = result.get('token', None) SSORequestHandler.token = tokens[0] if len(tokens) > 0 else None self.wfile.write(response) def handle_http(self, status_code): self.send_response(status_code) self.send_header('Content-type', 'text/html') self.end_headers() content = ''' <html><head><title>modcli - success</title></head> <body>Authentication successful! This browser window can be closed.</body></html> ''' return bytes(content, 'UTF-8') def log_message(self, format, *args): pass httpd = HTTPServer((server_host, server_port), SSORequestHandler) httpd.timeout = 30 webbrowser.open('{0}/users/tokens_sso?local_url={1}'.format(api_url, local_server)) try: httpd.handle_request() except KeyboardInterrupt: pass token = SSORequestHandler.token if not token: raise Exception('Authentication failed!') return token
/modcli/bundle.py
import os import shutil import subprocess import tempfile from hashlib import md5 import click import crayons import requests from modcli import context from modcli.utils import read_json_file def publish(project_file: str, packages_path: str, keep_environment: bool=False, bundles: list=None, show_result: bool=False, rebuild: bool=False, env_name: str=None, force: bool=False): project_file = os.path.realpath(project_file) packages_path = os.path.realpath(packages_path) if packages_path else None env = context.get_env(env_name) if not env.token: raise Exception('You must authenticate first') if not os.path.isfile(project_file): raise Exception('File {0} not found or not a valid file'.format(project_file)) if packages_path: if not os.path.isdir(packages_path): raise Exception('Packages path {0} not found'.format(packages_path)) else: packages_path = os.path.dirname(project_file) project = os.path.split(project_file)[1] if not force and not click.confirm('Project {0} will be compiled and published in [{1}], ' 'do you confirm?'.format(crayons.green(project), crayons.green(env.name))): raise Exception('Cancelled') process = read_json_file(project_file) # setting up process data if keep_environment: process['keep_environment'] = True process['rebuild'] = rebuild buildroot_pkg = process.pop('buildroot_pkg', None) mk_filename = '{0}.mk'.format(buildroot_pkg) if not buildroot_pkg: raise Exception('Missing buildroot_pkg in project file') if bundles: process['bundles'] = [b for b in process['bundles'] if b['name'] in bundles] if not process['bundles']: raise Exception('Could not match any bundle from: {0}'.format(bundles)) # find buildroot_pkg under packages_path mk_path = next((i[0] for i in os.walk(packages_path) if mk_filename in i[2]), None) if not mk_path: raise Exception('Could not find buildroot mk file for package {0} in {1}'.format(buildroot_pkg, packages_path)) basename = os.path.basename(mk_path) if basename != buildroot_pkg: raise Exception('The package folder containing the .mk file has to be named {0}'.format(buildroot_pkg)) pkg_path = os.path.dirname(mk_path) work_dir = tempfile.mkdtemp() try: package = '{0}.tar.gz'.format(buildroot_pkg) source_path = os.path.join(work_dir, package) try: subprocess.check_output( ['tar', 'zhcf', source_path, buildroot_pkg], stderr=subprocess.STDOUT, cwd=os.path.join(pkg_path) ) except subprocess.CalledProcessError as ex: raise Exception(ex.output.decode()) click.echo('Submitting release process for project {0} using file {1}'.format(project_file, package)) click.echo('URL: {0}'.format(env.bundle_url)) headers = {'Authorization': 'MOD {0}'.format(env.token)} result = requests.post('{0}/'.format(env.bundle_url), json=process, headers=headers) if result.status_code == 401: raise Exception('Invalid token - please authenticate (see \'modcli auth\')') elif result.status_code != 200: raise Exception('Error: {0}'.format(result.text)) release_process = result.json() click.echo('Release process created: {0}'.format(release_process['id'])) click.echo('Uploading buildroot package {0} ...'.format(package)) with open(source_path, 'rb') as fh: data = fh.read() headers = {'Content-Type': 'application/octet-stream'} result = requests.post(release_process['source-href'], data=data, headers=headers) if result.status_code == 401: raise Exception('Invalid token - please authenticate (see \'modcli auth\')') elif result.status_code != 201: raise Exception('Error: {0}'.format(result.text)) checksum = result.text.lstrip('"').rstrip('"') result_checksum = md5(data).hexdigest() if checksum == result_checksum: click.echo('Checksum match ok!') else: raise Exception('Checksum mismatch: {0} <> {1}'.format(checksum, result_checksum)) finally: click.echo('Cleaning up...') shutil.rmtree(work_dir, ignore_errors=True) release_process_url = release_process['href'] click.echo(crayons.blue('Process url: {0}?pretty=true'.format(release_process_url))) click.echo(crayons.green('Done')) if show_result: click.echo('Retrieving release process from {0} ...'.format(release_process_url)) release_process_full = requests.get('{0}?pretty=true'.format(release_process_url)).text click.echo(crayons.blue('================ Release Process {0} ================'.format(release_process['id']))) click.echo(release_process_full) click.echo(crayons.blue('================ End Release Process ================'))
/modcli/cli.py
import click import crayons from modcli import context, auth, __version__, bundle _sso_disclaimer = '''SSO login requires you have a valid account in MOD Forum (https://forum.moddevices.com). If your browser has an active session the credentials will be used for this login. Confirm?''' @click.group(context_settings=dict(help_option_names=['-h', '--help'])) @click.version_option(prog_name='modcli', version=__version__) def main(): pass @click.group(name='auth', help='Authentication commands') def auth_group(): pass @click.group(name='bundle', help='LV2 bundle commands') def bundle_group(): pass @click.group(name='config', help='Configuration commands') def config_group(): pass @click.command(help='Authenticate user with SSO (MOD Forum)') @click.option('-s', '--show-token', type=bool, help='Print the JWT token obtained', is_flag=True) @click.option('-o', '--one-time', type=bool, help='Only print token once (do not store it)', is_flag=True) @click.option('-y', '--confirm-all', type=bool, help='Confirm all operations', is_flag=True) @click.option('-d', '--detached-mode', type=bool, help='Run process without opening a local browser', is_flag=True) @click.option('-e', '--env_name', type=str, help='Switch to environment before authenticating') def login_sso(show_token: bool, one_time: bool, confirm_all: bool, detached_mode: bool, env_name: str): if env_name: context.set_active_env(env_name) env = context.current_env() if not confirm_all: response = click.confirm(_sso_disclaimer) if not response: exit(1) if not one_time: click.echo('Logging in to [{0}]...'.format(env.name)) try: if detached_mode: token = auth.login_sso_detached(env.api_url) else: token = auth.login_sso(env.api_url) except Exception as ex: click.echo(crayons.red(str(ex)), err=True) exit(1) return if not one_time: env.set_token(token) context.save() if show_token or one_time: print(token.strip()) else: click.echo(crayons.green('You\'re now logged in as [{0}] in [{1}].'.format(env.username, env.name))) @click.command(help='Authenticate user') @click.option('-u', '--username', type=str, prompt=True, help='User ID') @click.option('-p', '--password', type=str, prompt=True, hide_input=True, help='User password') @click.option('-s', '--show-token', type=bool, help='Print the JWT token obtained', is_flag=True) @click.option('-o', '--one-time', type=bool, help='Only print token once (do not store it)', is_flag=True) @click.option('-e', '--env_name', type=str, help='Switch to environment before authenticating') def login(username: str, password: str, show_token: bool, one_time: bool, env_name: str): if env_name: context.set_active_env(env_name) env = context.current_env() if not one_time: click.echo('Logging in to [{0}]...'.format(env.name)) try: token = auth.login(username, password, env.api_url) except Exception as ex: click.echo(crayons.red(str(ex)), err=True) exit(1) return if not one_time: env.set_token(token) context.save() if show_token or one_time: print(token.strip()) else: click.echo(crayons.green('You\'re now logged in as [{0}] in [{1}].'.format(username, env.name))) @click.command(help='Remove all tokens and reset context data') def clear_context(): try: context.clear() except Exception as ex: click.echo(crayons.red(str(ex)), err=True) exit(1) return click.echo(crayons.green('Context cleared')) @click.command(help='Show current active access JWT token') @click.option('-e', '--env_name', type=str, help='Show current active token from a specific environment') def active_token(env_name: str): if env_name: context.set_active_env(env_name) token = context.active_token() if not token: click.echo(crayons.red('You must authenticate first.'), err=True) click.echo('Try:\n $ modcli auth login') exit(1) return click.echo(token) @click.command(help='Set active environment, where ENV_NAME is the name') @click.argument('env_name') def set_active_env(env_name: str): try: context.set_active_env(env_name) context.save() except Exception as ex: click.echo(crayons.red(str(ex)), err=True) exit(1) return click.echo(crayons.green('Current environment set to: {0}'.format(env_name))) @click.command(help='Add new environment, where ENV_NAME is the name, API_URL ' 'and BUNDLE_URL are the API entry points') @click.argument('env_name') @click.argument('api_url') @click.argument('bundle_url') def add_env(env_name: str, api_url: str, bundle_url: str): try: context.add_env(env_name, api_url, bundle_url) context.set_active_env(env_name) context.save() except Exception as ex: click.echo(crayons.red(str(ex)), err=True) exit(1) return click.echo(crayons.green('Environment [{0}] added and set as active'.format(env_name))) @click.command(help='List current configuration', name='list') def list_config(): env = context.current_env() click.echo('Active environment: {0}'.format(env.name)) click.echo('Authenticated in [{0}]: {1}'.format(env.name, 'Yes' if env.token else 'No')) click.echo('Registered environments: {0}'.format(list(context.environments.keys()))) @click.command(help='Publish LV2 bundles, where PROJECT_FILE points to the buildroot project descriptor file (JSON)') @click.argument('project_file') @click.option('-p', '--packages-path', type=str, help='Path to buildroot package') @click.option('-s', '--show-result', type=bool, help='Print pipeline process result', is_flag=True) @click.option('-k', '--keep-environment', type=bool, help='Don\'t remove build environment after build', is_flag=True) @click.option('-r', '--rebuild', type=bool, help='Don\'t increment release number, just rebuild', is_flag=True) @click.option('-e', '--env', type=str, help='Environment where the bundles will be published') @click.option('-f', '--force', type=bool, help='Don\'t ask for confirmation', is_flag=True) def publish(project_file: str, packages_path: str, show_result: bool, keep_environment: bool, rebuild: bool, env: str, force: bool): try: bundle.publish(project_file, packages_path, show_result=show_result, keep_environment=keep_environment, rebuild=rebuild, env_name=env, force=force) except Exception as ex: click.echo(crayons.red(str(ex)), err=True) exit(1) return auth_group.add_command(active_token) auth_group.add_command(login) auth_group.add_command(login_sso) bundle_group.add_command(publish) config_group.add_command(add_env) config_group.add_command(set_active_env) config_group.add_command(list_config) config_group.add_command(clear_context) main.add_command(auth_group) main.add_command(bundle_group) main.add_command(config_group) if __name__ == '__main__': main()
/modcli/config.py
import base64 import json import os import stat import re from modcli import settings from modcli.utils import read_json_file def read_context(): context = CliContext.read(settings.CONFIG_DIR) if len(context.environments) == 0: for env_name, urls in settings.URLS.items(): context.add_env(env_name, urls[0], urls[1]) context.set_active_env(settings.DEFAULT_ENV) context.save() return context def clear_context(): CliContext.clear(settings.CONFIG_DIR) def _write_file(path: str, data: str, remove_existing: bool=True): # create dir if doesn't exist dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname, exist_ok=True) # remove previous file if remove_existing: if os.path.isfile(path): os.remove(path) # write json file with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR), 'w') as fh: fh.write(data) fh.writelines(os.linesep) def _write_json_file(path: str, data: dict, remove_existing: bool=True): _write_file(path, json.dumps(data, indent=4), remove_existing) def _remove_file(path: str): if os.path.isfile(path): os.remove(path) class CliContext(object): _filename = 'context.json' _access_token_filename = 'access_token' @staticmethod def read(path: str): context = CliContext(path) data = read_json_file(os.path.join(path, CliContext._filename)) if not data: return context for env_data in data['environments']: context.add_env(env_data['name'], env_data['api_url'], env_data['bundle_url']) env = context.environments[env_data['name']] env.username = env_data['username'] env.token = env_data['token'] env.exp = env_data['exp'] context.set_active_env(data['active_env']) return context def __init__(self, path: str): self._path = path self._active_env = '' self.environments = {} def _ensure_env(self, env_name: str): if env_name not in self.environments: raise Exception('Environment {0} doen\'t exist'.format(env_name)) def set_active_env(self, env_name: str): if not env_name: self._active_env = '' else: self._ensure_env(env_name) self._active_env = env_name def add_env(self, env_name: str, api_url: str, bundle_url: str): if not env_name: raise Exception('Environment name is invalid') if env_name in self.environments: raise Exception('Environment {0} already exists'.format(env_name)) if not re.match('https?://.*', api_url): raise Exception('Invalid api_url: {0}'.format(api_url)) if not re.match('https?://.*', bundle_url): raise Exception('Invalid api_url: {0}'.format(bundle_url)) self.environments[env_name] = EnvSettings(env_name, api_url, bundle_url) def remove_env(self, env_name: str): self._ensure_env(env_name) del self.environments[env_name] def active_token(self): return self.current_env().token def current_env(self): if not self._active_env: raise Exception('Not environment has been set') return self.environments[self._active_env] def get_env(self, env_name: str=None): if not env_name: return self.current_env() self._ensure_env(env_name) return self.environments[env_name] def save(self): data = { 'active_env': self._active_env, 'environments': list({ 'name': e.name, 'api_url': e.api_url, 'bundle_url': e.bundle_url, 'username': e.username, 'token': e.token, 'exp': e.exp, } for e in self.environments.values()) } _write_json_file(os.path.join(self._path, CliContext._filename), data) active_token = self.active_token() if active_token: _write_file(os.path.join(self._path, CliContext._access_token_filename), active_token) else: _remove_file(os.path.join(self._path, CliContext._access_token_filename)) def clear(self): _remove_file(os.path.join(self._path, CliContext._filename)) _remove_file(os.path.join(self._path, CliContext._access_token_filename)) self.environments.clear() class EnvSettings(object): def __init__(self, name: str, api_url: str, bundle_url: str): self.name = name self.api_url = api_url.rstrip('/') self.bundle_url = bundle_url.rstrip('/') self.username = '' self.token = '' self.exp = '' def set_token(self, token: str): _, payload, _ = token.split('.') payload_data = json.loads(base64.b64decode(payload + '===').decode()) username = payload_data['user_id'] exp = payload_data.get('exp', None) self.username = username self.token = token self.exp = exp
/modcli/settings.py
import os CONFIG_DIR = os.path.expanduser('~/.config/modcli') URLS = { 'labs': ('https://api-labs.moddevices.com/v2', 'https://pipeline-labs.moddevices.com/bundle/'), 'dev': ('https://api-dev.moddevices.com/v2', 'https://pipeline-dev.moddevices.com/bundle/'), } DEFAULT_ENV = 'labs'
/modcli/utils.py
import json import os def read_json_file(path: str): if not os.path.isfile(path): return {} with open(path, 'r') as file: contents = file.read() return json.loads(contents)
/setup.py
import re import sys from setuptools import setup with open('modcli/__init__.py', 'r') as fh: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fh.read(), re.MULTILINE).group(1) if sys.version_info[0] < 3: raise Exception("Must be using Python 3") setup( name='mod-devel-cli', python_requires='>=3', version=version, description='MOD Command Line Interface', author='Alexandre Cunha', author_email='alex@moddevices.com', license='Proprietary', install_requires=[ 'click==6.7', 'crayons==0.1.2', 'requests>=2.18.4', ], packages=[ 'modcli', ], entry_points={ 'console_scripts': [ 'modcli = modcli.cli:main', ] }, classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], url='http://moddevices.com/', )
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
limkokholefork/Answerable
refs/heads/main
{"/tools/cache.py": ["/tools/log.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/spider.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/log.py": ["/tools/displayer.py"]}
└── ├── answerable.py ├── models │ ├── content_based_0.py │ └── content_based_1.py └── tools ├── cache.py ├── displayer.py ├── fetcher.py ├── log.py ├── spider.py └── statistics.py
/answerable.py
import re import json import argparse import datetime import textwrap import importlib from urllib.error import URLError from tools import fetcher, displayer, log, spider _current_version = "v1.1" def latest_version(): try: res = spider.get( "https://api.github.com/repos/MiguelMJ/Answerable/releases/latest", 0 ) if res.status_code != 200: log.warn("Unable to get information from latest version") return None latest = re.search(r"v[\d.]+.?", json.loads(res.content)["name"])[0] return latest except URLError: log.warn("Unable to get information from latest version") return None _config_file = ".config" def get_user_tags(args): """Return the tags if the args contain the tags file If the user used the -t option, parse the specified file. Otherwise, return None """ if args.tags is not None: return fetcher.get_user_tags(args.tags) else: log.log("No tags file provided.") return None def load_config(args) -> dict: """Return the user configuration If the _config_file exists, return its contents. Otherwise, extract the the configuration from the options -u, -t and -m """ try: with open(_config_file) as fh: file_config = json.load(fh) except IOError: file_config = {} finally: default_config = {"model": "content_based_1"} cli_config = {"user": args.user, "tags": args.tags, "model": args.model} cli_config = {k: v for k, v in cli_config.items() if v is not None} config = {**default_config, **file_config, **cli_config} if config["user"] is None: log.abort(".config not found: provide user id with -u option") return config def save_config(args): """Store the user configuration Create or overwrite the configuration file with the configuration extracted from the options -u and -t. """ with open(_config_file, "w") as fh: tags = get_user_tags(args) json.dump( {"user": args.user, "tags": tags, "model": args.model or "content_based_1"}, fh, indent=2, ) log.log("Configuration saved in {}", _config_file) def summary(args): """Display a summary of the answered questions""" config = load_config(args) qa = fetcher.get_QA(config["user"], force_reload=args.f) qa = [(q, a) for q, a in qa if a is not None] displayer.disp_statistics(qa) def recommend(args): """Recommend questions from the latest unanswered""" filtered = {"hidden": 0, "closed": 0, "duplicate": 0} def valid_entry(entry): """Check if a entry should be taken into account""" if len(set(entry["tags"]) & hide_tags) > 0: filtered["hidden"] += 1 return False if entry["title"][-8:] == "[closed]": filtered["closed"] += 1 return False if entry["title"][-11:] == "[duplicate]": filtered["duplicate"] += 1 return False return True def cf(x): """Color a value according to its value""" return ( displayer.fg(x, displayer.green) if x == 0 else displayer.fg(x, displayer.magenta) ) # Load configuration config = load_config(args) # Load the model try: model_name = config["model"] log.log("Loading model {}", displayer.fg(model_name, displayer.yellow)) model = importlib.import_module(f".{model_name}", "models") log.log( "Model {} succesfully loaded", displayer.fg(model_name, displayer.green) ) except ModuleNotFoundError as err: if err.name == f"models.{model_name}": log.abort("Model {} not present", model_name) else: log.abort("Model {} unsatisfied dependency: {}", model_name, err.name) # Get user info and feed user_qa = fetcher.get_QA(config["user"], force_reload=args.f) if args.all or "tags" not in config: tags = "" else: tags = "tag?tagnames=" tags += "%20or%20".join(config["tags"]["followed"]).replace("+", "%2b") tags += "&sort=newest" url = "https://stackoverflow.com/feeds/" + tags try: feed = fetcher.get_question_feed(url, force_reload=args.F) if len(feed) == 0: raise ValueError("No feed returned") # Filter feed from ignored tags hide_tags = ( set() if args.all or "tags" not in config else set(config["tags"]["ignored"]) ) useful_feed = [e for e in feed if valid_entry(e)] if len(useful_feed) == 0: raise ValueError("All feed filtered out") log.log( "Discarded: {} ignored | {} closed | {} duplicate", cf(filtered["hidden"]), cf(filtered["closed"]), cf(filtered["duplicate"]), ) # Make the recommendation log.log(f"Corpus size: {len(user_qa)} Feed size: {len(useful_feed)}") rec_index, info = model.recommend(user_qa, useful_feed) selection = [useful_feed[i] for i in rec_index[: args.limit]] if args.info and info is None: log.warn("Info requested, but model {} returns None", model_name) elif args.info and info is not None: info = [info[i] for i in rec_index[: args.limit]] displayer.disp_feed(selection, info, args.info) except ValueError as err: log.warn(err) log.print_advice() def parse_arguments() -> argparse.Namespace: """Parse the command line arguments Parse sys.argv into a Namespace, that will be used in the rest of the functions. """ parser = argparse.ArgumentParser( usage="%(prog)s COMMAND [OPTIONS]", description=f"Answerable {_current_version}\nStack Overflow unanswered questions recommendation system", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent( """\ Code: https://github.com/MiguelMJ/Answerable Documentation: in https://github.com/MiguelMJ/Answerable/wiki """ ), ) parser.add_argument( "command", choices=("save", "summary", "recommend"), help="save,summary,recommend", metavar="COMMAND", ) parser.add_argument( "-v", "--verbose", help="show the log content in stderr too", action="store_true", ) parser.add_argument( "-i", "--info", help="print extra info on each recomendation", action="store_true", ) parser.add_argument("--no-ansi", help="print without colors", action="store_true") parser.add_argument("-f", help="force reload of user data", action="store_true") parser.add_argument( "-F", help="force retrieval of question feed", action="store_true" ) parser.add_argument( "-l", "--limit", help="limit the number of items displayed", type=int, default=999, metavar="N", ) parser.add_argument( "-a", "--all", help="don't use tags to filter the feed. If the user tags haven't been saved before with the <save> command, this option is on by default", action="store_true", ) parser.add_argument( "-u", "--user", help="identifier of Stack Overflow user", metavar="ID" ) parser.add_argument( "-t", "--tags", help="file with the source of the page with the user followed and ignored tags", metavar="FILE", ) parser.add_argument( "-m", "--model", help="specify the recommendation model you want to use", metavar="MODEL", ) args = parser.parse_args() if args.no_ansi: displayer.ansi = False return args if __name__ == "__main__": _latest_version = latest_version() if _latest_version is not None and _latest_version != _current_version: log.warn( f"New version on GitHub: {_latest_version} (current is {_current_version})" ) switch = { "save": save_config, "summary": summary, "recommend": recommend, } args = parse_arguments() command = args.command log.add_log("answerable.log") if args.verbose: log.add_stderr() log.log(displayer.bold("Log of {}"), datetime.datetime.now()) switch[command](args) log.close_logs()
/models/content_based_0.py
"""Recommender Tool for Answerable This file contains the recommendation algorithm. """ from bs4 import BeautifulSoup as bs from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import linear_kernel def recommend(user_qa, feed): answered = [ x[0]["title"] + " " + bs(x[0]["body"], "html.parser").getText(" ", strip=True) for x in user_qa ] tags_ans = [" ".join(x[0]["tags"]) for x in user_qa] questions = [x["title"] + x["body"] for x in feed] tags_unans = [" ".join(x["tags"]) for x in feed] nans = len(answered) nunans = len(questions) """ The following code is an adapted version of the Content-Based recommmender described in this tutorial: https://www.datacamp.com/community/tutorials/recommender-systems-python """ tfidf = TfidfVectorizer(stop_words="english") count = CountVectorizer(stop_words="english") # list of vectorized body and tags tfidf_matrix = tfidf.fit_transform(answered + questions) count_matrix = count.fit_transform(tags_ans + tags_unans) # similarity matrices: without and with tags cosine_sim_body = linear_kernel(tfidf_matrix, tfidf_matrix) cosine_sim_tags = linear_kernel(count_matrix, count_matrix) + cosine_sim_body # rows: unanswered, cols: answered unans_similarity_body = cosine_sim_body[nans:, :nans] unans_similarity_tags = cosine_sim_tags[nans:, :nans] # form of the following lists: [(feed index, value)] sum_sim_body = enumerate([sum(r) for r in unans_similarity_body]) max_sim_body = enumerate([max(r) for r in unans_similarity_body]) sum_sim_tags = enumerate([sum(r) for r in unans_similarity_tags]) max_sim_tags = enumerate([max(r) for r in unans_similarity_tags]) # sort the indices by the value sort_sum_sim_body = sorted(sum_sim_body, key=lambda x: x[1], reverse=True) sort_max_sim_body = sorted(max_sim_body, key=lambda x: x[1], reverse=True) sort_sum_sim_tags = sorted(sum_sim_tags, key=lambda x: x[1], reverse=True) sort_max_sim_tags = sorted(max_sim_tags, key=lambda x: x[1], reverse=True) # map each index to its classifications by_sum_body = {x[0]: i for i, x in enumerate(sort_sum_sim_body)} by_max_body = {x[0]: i for i, x in enumerate(sort_max_sim_body)} by_sum_tags = {x[0]: i for i, x in enumerate(sort_sum_sim_tags)} by_max_tags = {x[0]: i for i, x in enumerate(sort_max_sim_tags)} # compute the mean classification for each index mean_index = [] for i in range(nunans): mean = (by_sum_body[i] + by_sum_tags[i] + by_max_body[i] + by_max_tags[i]) / 4 mean_index.append((mean, i)) # build the final recommended feed order by_mean = [x[1] for x in sorted(mean_index)] return by_mean, None
/models/content_based_1.py
"""Recommender Tool for Answerable This file contains the recommendation algorithm. """ import tools.displayer from bs4 import BeautifulSoup as bs from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel import numpy as np import re def preprocessed_text_from_html(html): soup = bs(html, "html.parser") for tag in soup.findAll(name="code"): tag.decompose() text = soup.getText(" ", strip=True) text = re.sub(r"\d+", "", text) text = " ".join(re.findall(r"[\w+_]+", text)) return text.lower() def recommend(user_qa, feed): answered = [ " ".join(x["tags"]) + " " + x["title"].lower() + " " + preprocessed_text_from_html(x["body"]) for [x, _] in user_qa ] unanswered = [ " ".join(x["tags"]) + " " + x["title"].lower() + " " + preprocessed_text_from_html(x["body"]) for x in feed ] nans = len(answered) tfidf = TfidfVectorizer(stop_words="english") # list of vectorized text tfidf_matrix = tfidf.fit_transform(answered + unanswered) # similarity matrix of each answer with the rest cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix) # rows: unanswered, cols: answered unans_similarity = cosine_sim[nans:, :nans] # index: unanswered. values: max similarity, text size and score max_sim = list(enumerate([max(r) for r in unans_similarity])) unans_sizes = [len(u.split()) for u in unanswered] score = [x * x * unans_sizes[i] for i, x in max_sim] # sort the indices by the value by_score = sorted(list(enumerate(score)), key=lambda x: x[1], reverse=True) # relation between index in feed and index of closest answered closest = [ (i, np.where(np.isclose(unans_similarity[i], v))[0][0]) for i, v in max_sim ] # store displayable information b = tools.displayer.bold info_f = "{}: {{}}\n{}:{{}} {}: {{}} {}: {{}}".format( b("Closest"), b("Text size"), b("Similarity"), b("Score"), ) info = [] for unans, ans in closest: info.append( info_f.format( user_qa[ans][0]["title"], unans_sizes[unans], f"{100*max_sim[unans][1]:.2f}%", f"{score[unans]:.2f}", ) ) # get the indexes, now sorted sorted_index = [x[0] for x in by_score] return sorted_index, info
/tools/cache.py
"""Cache Tool for Answerable This file contains the functions to access and modify cached content. It may be used by different modules, so each function requires a category argument to avoid collisions. As every function is intended to serve a secondary role in extern functions, the logs have an extra level of indentation. """ import json import pathlib from datetime import datetime as dt from datetime import timedelta as td from tools.log import log from tools.displayer import fg, green, magenta __cache_dir = ".cache" def check(category: str, _file: str, max_delta: td) -> (bool, pathlib.Path): """Return if a file is cached and where it is located. Returns: (B, P) where - B is true if the content is cached and usable - P is the path where the cached content is/should be. Parameters: category: Folder inside the cache. _file: File name to look for. max_delta: Timedelta used as threshold to consider a file too old. """ # Prepare the path to the cached file subpath = pathlib.Path(category) / _file path = pathlib.Path.cwd() / __cache_dir / subpath path.parent.mkdir(parents=True, exist_ok=True) try: if not path.exists(): log(" Miss {}", fg(subpath, magenta)) return False, path else: # Check if the file is too old log(" Hit {}", fg(subpath, green)) modified = dt.fromtimestamp(path.stat().st_mtime) now = dt.now() delta = now - modified log(" Time passed since last fetch: {}", delta) valid = delta < max_delta if valid: log(fg(" Recent enough", green)) else: log(fg(" Too old", magenta)) return valid, path except OSError as err: log(" {}: {}", err, fg(subpath, magenta)) return False, path def update(category: str, _file: str, obj, json_format=True): """Update or create a file in the cache Parameters: category: Folder inside the cache. _file: File name to store in. obj: Serializable object to store. """ subpath = pathlib.Path(category) / _file path = pathlib.Path.cwd() / __cache_dir / subpath path.parent.mkdir(parents=True, exist_ok=True) try: with open(path, "w") as fh: if json_format: json.dump(obj, fh, indent=2) else: fh.write(obj) log(" Cache updated: {}", fg(subpath, green)) except OSError as err: log(" {}: {}", err, fg(subpath, magenta)) return False, path
/tools/displayer.py
"""Displayer Tool for Answerable This file contains the functions and variables used to present the data. """ import tools.statistics as st # # COLOR RELATED VARIABLES AND FUNCTIONS # red = (250, 0, 0) green = (0, 250, 0) blue = (0, 0, 250) cyan = (0, 250, 250) magenta = (250, 0, 250) yellow = (250, 250, 0) """ white = (250, 250, 250) gray1 = (200, 200, 200) gray2 = (150, 150, 150) gray3 = (100, 100, 100) gray4 = (50, 50, 50) black = (0, 0, 0) """ def lighten(c, r): dr = (250 - c[0]) * r dg = (250 - c[1]) * r db = (250 - c[2]) * r return (int(c[0] + dr), int(c[1] + dg), int(c[2] + db)) def darken(c, r): dr = c[0] * r dg = c[1] * r db = c[2] * r return (int(c[0] - dr), int(c[1] - dg), int(c[2] - db)) """ def interpolate(c, d, r): dr = (d[0] - c[0]) * r dg = (d[1] - c[1]) * r db = (d[2] - c[2]) * r return (int(c[0] + dr), int(c[1] + dg), int(c[2] + db)) """ # # ANSI RELATED VARIABLES AND FUNCTIONS # ansi = True def bold(msg): if not ansi: return msg return "\033[1m{}\033[0m".format(msg) def fg(msg, color): if not ansi: return msg return "\033[38;2;{:03};{:03};{:03}m{}\033[0m".format( color[0], color[1], color[2], msg ) def bg(msg, color): if not ansi: return msg return "\033[48;2;{:03};{:03};{:03}m{}\033[0m".format( color[0], color[1], color[2], msg ) def color(msg, fgc, bgc): return bg(fg(msg, fgc), bgc) # # DATA DISPLAY FUNCTIONS # def disp_feed(feed, info, print_info=False): def title(x): return fg(bold(x), lighten(blue, 0.3)) def tag(x): return fg(f"[{x}]", darken(cyan, 0.2)) for i in range(len(feed)): entry = feed[i] print("o", title(entry["title"])) print(" ", " ".join(tag(t) for t in entry["tags"])) print(" ", entry["link"]) if print_info and info is not None: print(" ", info[i].replace("\n", "\n ")) def table(data, align=""): cols = len(data[0]) widths = [] for i in range(0, cols): col = [x[i] for x in data] widths.append(max([len(str(c)) for c in col])) row_f = " ".join(["{{:{}{}}}".format(align, w) for w in widths]) for d in data: print(row_f.format(*d)) def disp_statistics(user_qa): ans_f = fg("{}", lighten(blue, 0.3)) tag_f = fg("[{}]", darken(cyan, 0.2)) val_f = bold(fg("{}", green)) def print_section(txt): print(bold(txt.upper())) print() def print_metric(txt): def mark(x): return bold(x) print(mark(txt)) def print_answer_and_value(answer, value): tags = answer["tags"] print(val_f.format(value), ans_f.format(answer["title"])) print(" " * len(str(value)), " ".join([tag_f.format(t) for t in tags])) user_answers = [a for q, a in user_qa] print_section("Answer metrics") metrics = [ (bold(k), val_f.format(m(user_answers))) for k, m in st.answer_metrics_single ] table(metrics) print() for (name, metric, key) in st.answer_metrics_tops: print_metric(name) results = metric(user_answers) for a in results: print_answer_and_value(a, key(a)) print() print_section("Tag metrics") for (name, metric) in st.tag_metrics: print_metric(name) results = metric(user_qa) results = [(tag_f.format(r[0]), val_f.format(r[1])) for r in results] table(results) print() print_section("Reputation metrics") metrics = [ (bold(k), val_f.format(m(user_answers))) for k, m in st.reputation_metrics_single ] table(metrics) print() for w in st.reputation_weight_metrics[0]: results = st.reputation_weight_metrics[1](user_answers, w) for i, info in enumerate(st.reputation_weight_metrics[2]): print_metric(info.format(w * 100)) print(val_f.format(results[i]))
/tools/fetcher.py
"""Fetcher Tool for Answerable This file contains the high level functions in charge of data retrieval. It provides a interface between the spider/crawler and another level of cacheable information. """ import math import json from datetime import timedelta as td from bs4 import BeautifulSoup from tools import spider, cache from tools.log import log, abort from tools.displayer import fg, magenta, green, bold cache_where = "fetcher" cache_threshold = td(hours=12) def get_questions(question_ids): """Retrieve questions from Stack Overflow - question_ids: list of question IDs Returns a list of objects with the following attributes: { "tags": [string], "answers": [ {"owner": {"user_id": int}} ], "score": int, "creation_date": timestamp, "question_id": int, "link": string, "title": string, "body": string (html) } """ # about this request: https://api.stackexchange.com/docs/questions-by-ids#page=1&pagesize=100&order=desc&sort=creation&ids=67519195&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA&site=stackoverflow api_request_f = "https://api.stackexchange.com//2.2/questions/{}?page={}&pagesize=100&order=desc&sort=creation&site=stackoverflow&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA" max_ids = 100 # no more than 100 ids allowed at once k = math.ceil(len(question_ids) / max_ids) log(f"{len(question_ids)} questions, {k} batches") questions = [] for i in range(k): log(f"batch {i+1}") batch_begin = i * max_ids batch_end = i * max_ids + max_ids subset = ";".join(question_ids[batch_begin:batch_end]) page = 1 while True: api_request = api_request_f.format(subset, page) response = spider.get( api_request, delay=0.5, use_cache=False ) # urls too long to cache if response.status_code != 200: abort(response) result = json.loads(response.content) questions += result["items"] if not result["has_more"]: break page += 1 return questions def get_user_answers(user_id, force_reload=False, max_page=math.inf): """Retrieve answers from a Stack Overflow user - user_id: user ID Returns a list of objects with the following attributes: { "is_accepted": bool, "score": int, "questions_id": int, "link": string, "title": string, "body": string (html), } """ api_request_f = "https://api.stackexchange.com/2.2/users/{}/answers?page={}&pagesize=100&order=desc&sort=activity&site=stackoverflow&filter=!37n)Y*a2Ut6eDilfH4XoIior(X(b8nm7Z-g)Tgl*A4Qdfe8Mcn-Luu" page = 1 answers = [] while page <= max_page: api_request = api_request_f.format(user_id, page) response = spider.get( api_request, delay=0.5, max_delta=td() if force_reload else td(hours=12) ) if response.status_code != 200: abort(response) result = json.loads(response.content) answers += result["items"] if not result["has_more"]: break page += 1 return answers def get_QA(user_id, force_reload=False, max_page=5): """Retrieve information about the questions answered by the user Return [ (Question_1, Answer_1), (Question_2, Answer_2), ... ] See get_questions, get_user_answers """ log(bold("Fetching user information")) if force_reload: log(fg("Force reload", magenta)) cache_file = str(user_id) + ".json" # Check cache if not force_reload: hit, fpath = cache.check(cache_where, cache_file, cache_threshold) if hit: with open(fpath) as fh: stored = json.load(fh) return stored # Get the answers answers = get_user_answers(user_id, force_reload, max_page) # Get the questions q_ids = [str(a["question_id"]) for a in answers] questions = get_questions(q_ids) # Join answers and questions user_qa = [ (q, a) for q in questions for a in answers if q["question_id"] == a["question_id"] ] cache.update(cache_where, cache_file, user_qa) for q, a in user_qa: a["tags"] = q["tags"] ## Include questions specified by user try: with open("include.txt", "r") as f: extra_q_ids = f.read().split() log("Aditional training: " + str(extra_q_ids)) extra_questions = get_questions(extra_q_ids) except FileNotFoundError: extra_questions = [] log("No additional training specified by user") user_qa += [(q, None) for q in extra_questions] return user_qa def get_question_feed(url, force_reload=False): """Retrieve the last questions of the feed Returns a structure with the following format: [Question_1, Question_2, ...] where Question_n has the following keys: link: str title: str body: str (html) tags: list of str """ log(bold("Fetching question feed")) if force_reload: log(fg("Force reload", magenta)) feed = spider.get_feed(url, force_reload=force_reload) if feed.status == 304: # Not Modified log(fg("Feed not modified since last retrieval (status 304)", magenta)) return [] log("Number of entries in feed: {}", fg(len(feed.entries), green)) questions = [] for entry in feed.entries: soup = BeautifulSoup(entry.summary, "html.parser") q = { "link": entry.link, "title": entry.title, "body": soup.getText(" ", strip=True), "tags": [x["term"] for x in entry.tags], } questions.append(q) return questions def get_user_tags(filename): """Parse the tags file and return the user followed and ignored tags""" try: with open(filename, "r") as fh: bs = BeautifulSoup(fh.read(), "html.parser") return { "followed": [ x.getText(" ", strip=True) for x in bs.find(id="watching-1").find_all("a", class_="post-tag") ], "ignored": [ x.getText(" ", strip=True) for x in bs.find(id="ignored-1").find_all("a", class_="post-tag") ], } except FileNotFoundError: abort("File not found: {}", filename)
/tools/log.py
"""Log Tool for Answerable This file contains the functions used to log control data and debug messages in a unified format. """ import re import sys import inspect from tools.displayer import bold, red, magenta, fg _logs = [] # list of file handlers _ansire = re.compile("\\033\[[^m]+m") # ansi escape sequences def _strip_ansi(msg): """Strip ansi escape sequences""" return re.sub(_ansire, "", msg) def _get_caller(): frm = inspect.stack()[2] return inspect.getmodule(frm[0]).__name__ def add_stderr(): """Add the stderr to the log file handlers""" _logs.append(sys.stderr) def add_log(logfile): """Open a new file and add it to the log file handlers""" _logs.append(open(logfile, "w")) def close_logs(): """Close all log file handlers.""" for f in _logs: if f is not sys.stderr: f.close() def advice_message(): """Returns the advice of where to find the full logs""" lognames = ", ".join([fh.name for fh in _logs if fh is not sys.stderr]) return "Full log in " + lognames def abort(msg, *argv): """Print an error message and aborts execution""" if sys.stderr not in _logs: add_stderr() log(fg(msg, red), *argv, who=_get_caller()) print_advice() close_logs() exit() def warn(msg, *argv): """Print an error message and aborts execution""" err_off = sys.stderr not in _logs if err_off: add_stderr() log(fg(msg, magenta), *argv, who=_get_caller()) _logs.pop() def print_advice(): """Print where to find the full log if necessary""" if sys.stderr not in _logs: print(advice_message(), file=sys.stderr) def log(msg, *argv, **kargs): """Print to logs a formatted message""" who = kargs["who"] if "who" in kargs else _get_caller() who = f"[{who}] " textf = who + _strip_ansi(msg.format(*argv)) texts = bold(who) + msg.format(*argv) for f in _logs: if f is sys.stderr: print(texts, file=f) sys.stderr.flush() else: print(textf, file=f)
/tools/spider.py
"""Spider Tool for Answerable This file contains the functions used to wrapp requests following respecful practices, taking into account robots.txt, conditional gets, caching contente, etc. """ import json import requests # from random import random as rnd from time import sleep from datetime import timedelta as td import feedparser from urllib.robotparser import RobotFileParser from urllib.parse import urlparse from tools import cache from tools.displayer import fg, bold, green, yellow, red from tools.log import log, abort _rp = {} # robots.txt memory class _FalseResponse: """Object with the required fields to simulate a HTTP response""" def __init__(self, code, content): self.status_code = code self.content = content def ask_robots(url: str, useragent: str) -> bool: """Check if the useragent is allowed to scrap an url Parse the robot.txt file, induced from the url, and check if the useragent may fetch a specific url. """ url_struct = urlparse(url) base = url_struct.netloc if base not in _rp: _rp[base] = RobotFileParser() _rp[base].set_url(url_struct.scheme + "://" + base + "/robots.txt") _rp[base].read() return _rp[base].can_fetch(useragent, url) def get(url, delay=2, use_cache=True, max_delta=td(hours=12)): """Respectful wrapper around requests.get""" useragent = "Answerable v0.1" # If a cached answer exists and is acceptable, then return the cached one. cache_file = url.replace("/", "-") if use_cache: log("Checking cache before petition {}", fg(url, yellow)) hit, path = cache.check("spider", cache_file, max_delta) if hit: with open(path, "r") as fh: res = fh.read().replace("\\r\\n", "") return _FalseResponse(200, res) # If the robots.txt doesn't allow the scraping, return forbidden status if not ask_robots(url, useragent): log(fg("robots.txt forbids {}", red), url) return _FalseResponse(403, "robots.txt forbids it") # Make the request after the specified delay # log("[{}] {}".format(fg("{:4.2f}".format(delay), yellow), url)) log("Waiting to ask for {}", fg(url, yellow)) log(" in {:4.2f} seconds", delay) sleep(delay) headers = {"User-Agent": useragent} log("Requesting") res = requests.get(url, timeout=10, headers=headers) # Exit the program if the scraping was penalized if res.status_code == 429: # too many requests abort("Too many requests") # Cache the response if allowed by user if use_cache: cache.update( "spider", cache_file, res.content.decode(res.encoding), json_format=False ) return res def get_feed(url, force_reload=False): """Get RSS feed and optionally remember to reduce bandwith""" useragent = "Answerable RSS v0.1" log("Requesting feed {}", fg(url, yellow)) cache_file = url.replace("/", "_") # Get the conditions for the GET bandwith reduction etag = None modified = None if not force_reload: hit, path = cache.check("spider.rss", cache_file, td(days=999)) if hit: with open(path, "r") as fh: headers = json.load(fh) etag = headers["etag"] modified = headers["modified"] log("with {}: {}", bold("etag"), fg(etag, yellow)) log("with {}: {}", bold("modified"), fg(modified, yellow)) # Get the feed feed = feedparser.parse(url, agent=useragent, etag=etag, modified=modified) # Store the etag and/or modified headers if feed.status != 304: etag = feed.etag if "etag" in feed else None modified = feed.modified if "modified" in feed else None new_headers = { "etag": etag, "modified": modified, } cache.update("spider.rss", cache_file, new_headers) log("Stored new {}: {}", bold("etag"), fg(etag, green)) log("Stored new {}: {}", bold("modified"), fg(modified, green)) return feed
/tools/statistics.py
"""Statistics Tool for Answerable This file contains the functions used to analyze user answers. """ # # TAG RELATED METRICS (USING QA) # _tags_info = None def tags_info(qa): """Map each tag to its score, acceptance and count""" global _tags_info if _tags_info is not None: return _tags_info tags_info = {} for _, a in qa: for t in a["tags"]: tc = tags_info.get(t, (0, 0, 0)) # (score, acceptance, count) tc = (tc[0] + a["score"], tc[1] + a["is_accepted"], tc[2] + 1) tags_info[t] = tc _tags_info = tags_info return tags_info def top_tags_use(qa, top=5): """Top tags by appearance""" tags = tags_info(qa) sorted_tags = sorted(tags, key=lambda x: tags[x][2], reverse=True) return [(x, tags[x][2]) for x in sorted_tags][:top] def top_tags_score_abs(qa, top=5): """Top tags by accumulated score""" tags = tags_info(qa) sorted_tags = sorted(tags, key=lambda x: tags[x][0], reverse=True) return [(x, tags[x][0]) for x in sorted_tags][:top] def top_tags_acceptance_abs(qa, top=5): """Top tags by accumulated acceptance""" tags = tags_info(qa) sorted_tags = sorted( tags, key=lambda x: tags[x][1], reverse=True, ) return [(x, tags[x][1]) for x in sorted_tags][:top] def top_tags_score_rel(qa, top=5): """Top tags by score per answer""" tags = tags_info(qa) sorted_tags = sorted(tags, key=lambda x: tags[x][0] / tags[x][2], reverse=True) return [(x, tags[x][0] / tags[x][2]) for x in sorted_tags][:top] def top_tags_acceptance_rel(qa, top=5): """Top tags by acceptance per answer""" tags = tags_info(qa) sorted_tags = sorted(tags, key=lambda x: tags[x][1] / tags[x][2], reverse=True) return [(x, tags[x][1] / tags[x][2]) for x in sorted_tags][:top] # # ANSWER RELATED METRICS # def top_answers(answers, top=5): """Top answers by score""" return sorted(answers, key=lambda x: x["score"], reverse=True)[:top] def top_accepted(answers, top=5): """Top accepted answers by score""" return list( filter( lambda x: x["is_accepted"], sorted(answers, key=lambda x: x["score"], reverse=True), ) )[:top] # # REPUTATION RELATED METRICS # def reputation(answer): """Reputation associated to an answers NOT ACCURATE """ return answer["score"] * 10 + answer["is_accepted"] * 15 _answers_sorted_reputation = None _total_reputation = None def answers_sorted_reputation(answers): """Answers sorted by associated reputation""" global _answers_sorted_reputation if _answers_sorted_reputation is None: _answers_sorted_reputation = sorted( answers, key=lambda x: reputation(x), reverse=True ) return _answers_sorted_reputation def total_reputation(answers): """Total reputation gained from answers""" global _total_reputation if _total_reputation is None: _total_reputation = sum([reputation(a) for a in answers]) return _total_reputation def average_reputation_weight(answers, w): """Average reputation and weight of answers generating w % reputation""" repw = total_reputation(answers) * w sorted_answers = answers_sorted_reputation(answers) acc_rep = 0 acc_ans = 0 while acc_rep < repw and acc_ans < len(sorted_answers): acc_rep += reputation(sorted_answers[acc_ans]) acc_ans += 1 if acc_ans == 0: return (0, 0) return (acc_rep / acc_ans, 100 * acc_ans / len(answers)) # # LISTS TO SIMPLIFY CALLING # tag_metrics = [ # call with qa ("Top used tags", top_tags_use), ("Top tags by accumulated score", top_tags_score_abs), ("Top tags by score per answer", top_tags_score_rel), ("Top tags by accumulated acceptance", top_tags_acceptance_abs), ("Top tags by acceptance per answer", top_tags_acceptance_rel), ] answer_metrics_single = [ # call with answers ("Answers analyzed", len), ("Total score", lambda x: sum([a["score"] for a in x])), ("Average score", lambda x: sum([a["score"] for a in x]) / len(x)), ("Total accepted", lambda x: sum([a["is_accepted"] for a in x])), ("Acceptance ratio", lambda x: sum([a["is_accepted"] for a in x]) / len(x)), ] answer_metrics_tops = [ # call with answers ("Top answers by score", top_answers, lambda a: a["score"]), ("Top accepted answers by score", top_accepted, lambda a: a["score"]), ] reputation_metrics_single = [ # call with answers ("Total reputation", lambda x: sum([reputation(a) for a in x])), ("Average reputation", lambda x: sum([reputation(a) for a in x]) / len(x)), ] reputation_weight_metrics = ( # call with answers and weights [0.95, 0.80], average_reputation_weight, ( "Average reputation on answers generating {:.0f}% reputation", "Percentage of answers generating {:.0f}% reputation", ), )
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
nairita87/Ocean_dir
refs/heads/ocean_coastal
{"/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_parameters.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/parameters.py", "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/wind_model.py"], "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_velocity_grid.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/parameters.py", "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/velocities.py", "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/profile_model/radialprofiles.py", "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/structures/geogrid.py"], "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_velocities.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/velocities.py"], "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/parameters.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/wind_model.py"], "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/utils/test_gis.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/utils/gis.py"], "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/hurricane/test_hurricane.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/hurricane_model/hurricane.py"], "/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/structures/test_geogrid.py": ["/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/structures/geogrid.py"]}
└── └── testing_and_setup └── compass └── ocean ├── baroclinic_channel │ └── 4km │ └── rpe_test │ └── plot.py ├── drying_slope │ └── zstar_variableCd │ └── 1km │ └── analysis │ └── comparison.py ├── global_ocean │ ├── ARM60to6 │ │ └── init │ │ └── define_base_mesh.py │ ├── CA120to3 │ │ └── build_mesh │ │ └── define_base_mesh.py │ ├── HI120to12 │ │ └── build_mesh │ │ └── define_base_mesh.py │ ├── SO60to10wISC │ │ └── init │ │ └── define_base_mesh.py │ └── scripts │ └── copy_cell_indices_ISC.py ├── hurricane │ ├── hurricane_wind_pressure │ │ ├── ad_hoc │ │ │ ├── simple_vector_example.py │ │ │ └── wind_vector_example.py │ │ ├── hurricane_model │ │ │ └── hurricane.py │ │ ├── main.py │ │ ├── plot_winds_on_mpaso_mesh.py │ │ ├── profile_model │ │ │ └── radialprofiles.py │ │ ├── structures │ │ │ └── geogrid.py │ │ ├── tests │ │ │ ├── hurricane │ │ │ │ └── test_hurricane.py │ │ │ ├── structures │ │ │ │ └── test_geogrid.py │ │ │ ├── utils │ │ │ │ └── test_gis.py │ │ │ └── winds │ │ │ ├── test_parameters.py │ │ │ ├── test_velocities.py │ │ │ └── test_velocity_grid.py │ │ ├── utils │ │ │ ├── gis.py │ │ │ └── math.py │ │ ├── winds │ │ │ ├── parameters.py │ │ │ ├── velocities.py │ │ │ └── wind_model.py │ │ └── winds_io │ │ ├── import_data.py │ │ └── output_data.py │ └── scripts │ ├── interpolate_time_varying_forcing.py │ ├── spinup_time_varying_forcing.py │ └── write_forcing_file.py ├── internal_waves │ └── 5km │ └── rpe_test │ └── plot.py ├── jigsaw_to_MPAS │ └── build_mesh.py ├── lock_exchange │ └── 0.5km │ └── rpe_test │ └── plot.py ├── overflow │ └── 1km │ └── rpe_test │ └── plot.py └── surface_waves └── analysis └── comparison.py
/testing_and_setup/compass/ocean/baroclinic_channel/4km/rpe_test/plot.py
import numpy as np from netCDF4 import Dataset import matplotlib.pyplot as plt import matplotlib matplotlib.use('Agg') fig = plt.gcf() nRow = 1 # 2 nCol = 5 nu = ['1', '5', '10', '100', '200'] iTime = [0] time = ['20'] # ---nx,ny for 10 km #nx = 16 #ny = 50 # ---nx,ny for 4 km nx = 40 ny = 126 # ---nx,ny for 1 km #nx = 160 #ny = 500 fig, axs = plt.subplots(nRow, nCol, figsize=( 2.1 * nCol, 5.0 * nRow), constrained_layout=True) for iCol in range(nCol): for iRow in range(nRow): ncfile = Dataset('output_' + str(iCol + 1) + '.nc', 'r') var = ncfile.variables['temperature'] var1 = np.reshape(var[iTime[iRow], :, 0], [ny, nx]) # --- flip in y-dir var = np.flipud(var1) # --- Every other row in y needs to average two neighbors in x on planar hex mesh var_avg = var for j in range(0, ny, 2): for i in range(0, nx - 2): var_avg[j, i] = (var[j, i + 1] + var[j, i]) / 2.0 if nRow == 1: ax = axs[iCol] if nRow > 1: ax = axs[iRow, iCol] dis = ax.imshow( var_avg, extent=[ 0, 160, 0, 500], cmap='jet', vmin=11.8, vmax=13.0) ax.set_title("day " + time[iRow] + ", " + r"$\nu_h=$" + nu[iCol]) ax.set_xticks(np.arange(0, 161, step=40)) ax.set_yticks(np.arange(0, 501, step=50)) if iRow == nRow - 1: ax.set_xlabel('x, km') if iCol == 0: ax.set_ylabel('y, km') if iCol == nCol - 1: if nRow == 1: fig.colorbar(dis, ax=axs[nCol - 1], aspect=40) if nRow > 1: fig.colorbar(dis, ax=axs[iRow, nCol - 1], aspect=40) ncfile.close() if nx == 16: res = '10' if nx == 40: res = '4' if nx == 160: res = '1' plt.savefig("sections_baroclinic_channel_" + res + "km.png")
/testing_and_setup/compass/ocean/drying_slope/zstar_variableCd/1km/analysis/comparison.py
../comparison.py
/testing_and_setup/compass/ocean/global_ocean/ARM60to6/init/define_base_mesh.py
# /usr/bin/env python """ % Create cell width array for this mesh on a regular latitude-longitude grid. % Outputs: % cellWidth - m x n array, entries are desired cell width in km % lat - latitude, vector of length m, with entries between -90 and 90, degrees % lon - longitude, vector of length n, with entries between -180 and 180, degrees """ import numpy as np import jigsaw_to_MPAS.mesh_definition_tools as mdt def cellWidthVsLatLon(): lat = np.arange(-90, 90.01, 0.1) lon = np.arange(-180, 180.01, 0.1) QU1 = np.ones(lat.size) EC60to30 = mdt.EC_CellWidthVsLat(lat) RRS30to6 = mdt.RRS_CellWidthVsLat(lat, 30, 6) AtlNH = RRS30to6 AtlGrid = mdt.mergeCellWidthVsLat(lat, EC60to30, AtlNH, 0, 4) PacNH = mdt.mergeCellWidthVsLat(lat, 30 * QU1, RRS30to6, 50, 12) PacGrid = mdt.mergeCellWidthVsLat(lat, EC60to30, PacNH, 0, 6) cellWidth = mdt.AtlanticPacificGrid(lat, lon, AtlGrid, PacGrid) import matplotlib.pyplot as plt import matplotlib matplotlib.use('Agg') plt.clf() plt.plot(lat, AtlGrid, label='Atlantic') plt.plot(lat, PacGrid, label='Pacific') plt.grid(True) plt.xlabel('latitude') plt.title('Grid cell size, km') plt.legend() plt.savefig('cellWidthVsLat.png') return cellWidth, lon, lat
/testing_and_setup/compass/ocean/global_ocean/CA120to3/build_mesh/define_base_mesh.py
#!/usr/bin/env python ''' name: define_base_mesh authors: Phillip J. Wolfram This function specifies the resolution for a coastal refined mesh for the CA coast from SF to LA for Chris Jeffrey and Mark Galassi. It contains the following resolution resgions: 1) a QU 120km global background resolution 2) 3km refinement region along the CA coast from SF to LA, with 30km transition region ''' import numpy as np import jigsaw_to_MPAS.coastal_tools as ct def cellWidthVsLatLon(): km = 1000.0 params = ct.default_params SFtoLA = {"include": [np.array([-124.0, -117.5, 34.2, 38.0])], # SF to LA "exclude": [np.array([-122.1, -120.8, 37.7, 39.2])]} # SF Bay Delta WestCoast = np.array([-136.0, -102.0, 22.0, 51]) print("****QU120 background mesh and 300m refinement from SF to LA****") params["mesh_type"] = "QU" params["dx_max_global"] = 120.0 * km params["region_box"] = SFtoLA params["plot_box"] = WestCoast params["dx_min_coastal"] = 3.0 * km params["trans_width"] = 100.0 * km params["trans_start"] = 30.0 * km cell_width, lon, lat = ct.coastal_refined_mesh(params) return cell_width / 1000, lon, lat
/testing_and_setup/compass/ocean/global_ocean/HI120to12/build_mesh/define_base_mesh.py
#!/usr/bin/env python ''' name: define_base_mesh authors: Phillip J. Wolfram This function specifies a high resolution patch for Chris Jeffrey. ''' import numpy as np def cellWidthVsLatLon(): lat = np.arange(-90, 90.01, 1.0) lon = np.arange(-180, 180.01, 2.0) km = 1000.0 # in kms baseRes = 120.0 highRes = 12.0 latC = 20.0 lonC = -155.0 rad = 10.0 theta = np.minimum(np.sqrt(((lat-latC)*(lat-latC))[:,np.newaxis] + ((lon-lonC)*(lon-lonC))[np.newaxis,:])/rad, 1.0) cellWidth = (baseRes*theta + (1.0-theta)*highRes)*np.ones((lon.size,lat.size)) return cellWidth, lon, lat
/testing_and_setup/compass/ocean/global_ocean/SO60to10wISC/init/define_base_mesh.py
import numpy as np import jigsaw_to_MPAS.mesh_definition_tools as mdt from jigsaw_to_MPAS.coastal_tools import signed_distance_from_geojson, \ compute_cell_width from geometric_features import read_feature_collection import xarray # Uncomment to plot the cell size distribution. # import matplotlib # matplotlib.use('Agg') # import matplotlib.pyplot as plt def cellWidthVsLatLon(): """ Create cell width array for this mesh on a regular latitude-longitude grid. Returns ------- cellWidth : numpy.ndarray m x n array, entries are desired cell width in km lat : numpy.ndarray latitude, vector of length m, with entries between -90 and 90, degrees lon : numpy.ndarray longitude, vector of length n, with entries between -180 and 180, degrees """ dlon = 0.1 dlat = dlon nlon = int(360./dlon) + 1 nlat = int(180./dlat) + 1 lon = np.linspace(-180., 180., nlon) lat = np.linspace(-90., 90., nlat) cellWidthSouth = 30. * np.ones((len(lat))) # Transition at Equator cellWidthNorth = mdt.EC_CellWidthVsLat(lat) latTransition = 0.0 latWidthTransition = 5.0 cellWidthVsLat = mdt.mergeCellWidthVsLat( lat, cellWidthSouth, cellWidthNorth, latTransition, latWidthTransition) _, cellWidth = np.meshgrid(lon, cellWidthVsLat) # now, add the high-res region fc = read_feature_collection('high_res_region.geojson') signed_distance = signed_distance_from_geojson(fc, lon, lat, max_length=0.25) da = xarray.DataArray(signed_distance, dims=['y', 'x'], coords={'y': lat, 'x': lon}, name='signed_distance') cw_filename = 'signed_distance.nc' da.to_netcdf(cw_filename) # multiply by 5 because transition_width gets multiplied by 0.2 in # compute_cell_width # Equivalent to 10 degrees latitude trans_width = 5*1100e3 # The last term compensates for the offset in compute_cell_width. # The middle of the transition is ~2.5 degrees (300 km) south of the # region boundary to best match previous transition at 48 S. (The mean lat # of the boundary is 45.5 S.) trans_start = -300e3 - 0.5 * trans_width dx_min = 10. cellWidth = compute_cell_width(signed_distance, cellWidth, lon, lat, dx_min, trans_start, trans_width, restrict_box={'include': [], 'exclude': []}) # Uncomment to plot the cell size distribution. # Lon, Lat = np.meshgrid(lon, lat) # ax = plt.subplot(111) # plt.pcolormesh(Lon, Lat, cellWidth) # plt.colorbar() # ax.set_aspect('equal') # ax.autoscale(tight=True) # plt.tight_layout() # plt.savefig('cellWidthVsLat.png', dpi=200) return cellWidth, lon, lat
/testing_and_setup/compass/ocean/global_ocean/scripts/copy_cell_indices_ISC.py
#!/usr/bin/env python ''' Script to map cell indices from MPASO noLI mesh to those of the wLI mesh in the runoff mapping file. Start by building a runoff mapping file that has all the mesh description from wLI mapping file but the actual mapping from the noLI mapping file: ncks -x -v S,col,row /project/projectdirs/acme/inputdata/cpl/cpl6/map_rx1_to_oEC60to30v3wLI_smoothed.r300e600.170328.nc newfile.nc ncks -A -v S,col,row /project/projectdirs/acme/inputdata/cpl/cpl6/map_rx1_to_oEC60to30v3_smoothed.r300e600.161222.nc newfile.nc ''' # import modules # {{{ import netCDF4 import numpy as np import argparse import shutil # }}} # parser # {{{ parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_file', dest='input_file', default='map_rx1_to_oEC60to30v3wLI.nc', help='Input file, original runoff mapping file' ) parser.add_argument('-o', '--output_file', dest='output_file', default='map_rx1_to_oEC60to30v3wLI_final.nc', help='Output file, revised runoff mapping file with no runoff below ice shelf cavities' ) parser.add_argument('-l', '--lookup_table_file', dest='lookup_table_file', default='lookup_table.txt', help='lookup table file, only used locally' ) parser.add_argument('-w', '--mesh_with_ISC', dest='mesh_with_ISC', default='culled_mesh.nc', help='mesh file, including ice shelf cavities' ) parser.add_argument('-n', '--mesh_no_ISC', dest='mesh_no_ISC', default='no_ISC_culled_mesh.nc', help='mesh file, but without ice shelf cavities' ) input_file = parser.parse_args().input_file output_file = parser.parse_args().output_file lookup_table_file = parser.parse_args().lookup_table_file shutil.copy2(input_file, output_file) # }}} build_table = True if build_table: # noLI mesh mesh_no_ISC = netCDF4.Dataset(parser.parse_args().mesh_no_ISC, 'r') noLIxCell = mesh_no_ISC.variables['xCell'][:] noLIyCell = mesh_no_ISC.variables['yCell'][:] noLInCells = len(mesh_no_ISC.dimensions['nCells']) # wLI mesh mesh_with_ISC = netCDF4.Dataset(parser.parse_args().mesh_with_ISC, 'r') wLIxCell = mesh_with_ISC.variables['xCell'][:] wLIyCell = mesh_with_ISC.variables['yCell'][:] # init lookup table lookup = np.zeros((noLInCells,), dtype=np.uint32) print("nCells=", noLInCells) for i in range(noLInCells): # for i in range(30): if i % 1000 == 0: print("Cell: ", i) # find index of wLI mesh that is the same location as each cell in the # noLI mesh lookup[i] = np.argmin((noLIxCell[i] - wLIxCell[:]) ** 2 + (noLIyCell[i] - wLIyCell[:])**2) mesh_no_ISC.close() mesh_with_ISC.close() print( "Lookup table complete.") np.savetxt(lookup_table_file, lookup, fmt='%d') print("Saved to ", lookup_table_file) else: lookup = np.loadtxt(lookup_table_file, dtype=np.uint32) print("Loaded lookup table from:", lookup_table_file) print("Lookup: first entries:", lookup[0:10]) print("Lookup: last entries:", lookup[-10:]) # now swap in wLI indices into the runoff mapping file f = netCDF4.Dataset(output_file, "r+") row = f.variables['row'][:] rownew = row * 0 for i in range(len(row)): rownew[i] = lookup[row[i] - 1] + 1 # 1-based f.variables['row'][:] = rownew[:] f.close() print("Copied over indices.") # vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/ad_hoc/simple_vector_example.py
import numpy as np import matplotlib.pyplot as plt def example(): x,y = np.linspace(-1,1,2), np.linspace(-1,1,2) A, B = np.zeros((2,2)), np.zeros((2,2)) A[0,0]=1 B[0,0]=-1 A[0,1]=1 B[0,1]=1 A[1,0]=-1 B[1,0]=-1 A[1,1]=-1 B[1,1]=1 fig = plt.figure() ax = fig.add_subplot(111) # Plot the streamlines. ax.streamplot(x,y,A,B) ax.set_xlabel('$x$') ax.set_ylabel('$y$') ax.set_xlim(-2,2) ax.set_ylim(-2,2) ax.set_aspect('equal') plt.show() if __name__=='__main__': example()
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/ad_hoc/wind_vector_example.py
import sys import numpy as np import matplotlib.pyplot as plt #from matplotlib.patches import Circle import math def W(x, y): """Return the wind vector given a wind speed.""" r = np.sqrt(x*x+y*y) v = V(r) if r>0: costheta = x/r sintheta = y/r return [-sintheta*v,costheta*v] else: return [0,0] def V(r): return 2*r*r*np.exp(-r) def example(n): # Grid of x, y points nx, ny = n, n x = np.linspace(-2, 2, nx) y = np.linspace(-2, 2, ny) # Wind field vector components U,V U, V = np.zeros((ny, nx)), np.zeros((ny, nx)) for j in range(ny-1,-1,-1): for i in range(0,nx): vv = W(x[i],y[j]) U[j,i]=vv[0] V[j,i]=vv[1] fig = plt.figure() ax1 = fig.add_subplot(1,1,1) # Plot the streamlines. ax1.streamplot(x, y, U, V, color=np.sqrt(U*U+V*V), cmap='Spectral') ax1.set_xlabel('$x$') ax1.set_ylabel('$y$') ax1.set_xlim(-2,2) ax1.set_ylim(-2,2) ax1.set_aspect('equal') plt.title('Tangential Wind Vectors') plt.show() if __name__=='__main__': example(8)
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/hurricane_model/hurricane.py
import datetime class Hurricane: def __init__(self, center: tuple, extent: float, pcentral: float, deltap: float, vmax: float, b: float, time: float, initial_datetime: datetime.datetime): self.center = center # Position of the eye (lon,lat) in radians as tuple. self.extent = extent # The maximum extent of the hurricane in kilometers. self.vforward = [] # Forward velocity [ve, vn] in km/hr. self.pcentral = pcentral # Central pressure in millibars. self.deltap = deltap # Pressure difference in millibars. self.vmax = vmax # The maximum gradient wind [ve, vn] in km/hr. self.b = b # The Holland parameter, conventionally in the range [0.5,2.5] self.time = time # Time of this trajectory point in hours. self.ref_time = initial_datetime def set_vf(self, vf: tuple): self.vforward = vf
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/main.py
from winds_io import import_data from winds_io import output_data from structures import geogrid import sys import numpy as np from winds import parameters from winds import wind_model def sim_hurricane(): # Read in the input file to check which grid we are using print('Import user inputs') traj_filename, grid_flag, grid_filename, ambient_pressure, holland_b_param = \ import_data.read_input_file('hurricane_inputs.txt') # Read grid-specific parameters and create grid print('Read-in grid') grid = import_data.initialize_grid(grid_filename, grid_flag) # Read hurricane trajectory and set hurricane parameters print('Initialize hurricane trajectory data') curr_hurricane = import_data.initialize_hurricane(traj_filename, ambient_pressure, holland_b_param) # Define parameters print('Define parameters') params = define_params(curr_hurricane) # Compute winds on grid print('Compute winds') winds = compute_winds(curr_hurricane, params, grid) # Output results print('Output results') output_data.write_netcdf('out.nc', curr_hurricane, grid, winds) def compute_winds(curr_hurricane, params, grid: geogrid): ntimes = len(curr_hurricane) - 1 mywinds = [] for it in range(0, ntimes): print('Time iteration %d / %d' % (it + 1, len(curr_hurricane) - 1)) mywinds.append(wind_model.WindModel(params, curr_hurricane[it], grid)) return mywinds def define_params(curr_hurricane): lat = [] for i in range(0, len(curr_hurricane)): lat.append(curr_hurricane[i].center[1]) return parameters.Parameters(np.mean(lat)) if __name__ == "__main__": sim_hurricane() print('Program executed succesfully') sys.exit(0) # # Read in the input file to check which grid we are using # traj_filename, grid_flag, grid_filename = import_data.read_input_file('hurricane_inputs.txt') # # # Read hurricane trajectory # traj = import_data.read_json(traj_filename) # # # Create trajectory object # curr_hurricane = initialize_hurricane(traj) # # # Read grid-specific parameters # if grid_flag == 1: # xll, yll, cellsize, numcells_lat, numcells_lon = import_data.read_raster_inputs(grid_filename) # else: # coord = import_data.read_netcdf(grid_filename) # Create the grid
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/plot_winds_on_mpaso_mesh.py
# Author: Steven Brus # Date: April, 2020 # Description: Plots syntetic wind/pressure timeseries on MPAS-O mesh import netCDF4 import matplotlib.pyplot as plt import numpy as np import os import cartopy import cartopy.crs as ccrs import cartopy.feature as cfeature plt.switch_backend('agg') cartopy.config['pre_existing_data_dir'] = \ os.getenv('CARTOPY_DIR', cartopy.config.get('pre_existing_data_dir')) ####################################################################### ####################################################################### def plot_data(lon_grid,lat_grid,data,var_label,var_abrev,time): fig = plt.figure() ax1 = fig.add_subplot(1,1,1,projection=ccrs.PlateCarree()) levels = np.linspace(np.amin(data),np.amax(data),100) cf = ax1.tricontourf(lon_grid,lat_grid,data,levels=levels,transform=ccrs.PlateCarree()) ax1.set_extent([0, 359.9, -90, 90], crs=ccrs.PlateCarree()) ax1.add_feature(cfeature.LAND, zorder=100) ax1.add_feature(cfeature.LAKES, alpha=0.5, zorder=101) ax1.add_feature(cfeature.COASTLINE, zorder=101) ax1.set_title('interpolated data '+time.strip()) cbar = fig.colorbar(cf,ax=ax1) cbar.set_label(var_label) # Save figure fig.tight_layout() fig.savefig(var_abrev+'_'+str(i).zfill(4)+'.png',box_inches='tight') plt.close() ####################################################################### ####################################################################### if __name__ == '__main__': grid_file = 'mesh.nc' data_file = 'out.nc' grid_nc = netCDF4.Dataset(grid_file,'r') lon_grid = grid_nc.variables['lonCell'][:]*180.0/np.pi lat_grid = grid_nc.variables['latCell'][:]*180.0/np.pi data_nc = netCDF4.Dataset(data_file,'r') u_data = data_nc.variables['windSpeedU'][:] v_data = data_nc.variables['windSpeedV'][:] p_data = data_nc.variables['atmosPressure'][:] xtime = data_nc.variables['xtime'][:] for i in range(u_data.shape[0]-1): print('Plotting vel: '+str(i)) data = np.sqrt(np.square(u_data[i,:]) + np.square(v_data[i,:])) time_ls = [x.decode("utf-8") for x in xtime[i]] time = ''.join(time_ls) plot_data(lon_grid,lat_grid,data,'velocity magnitude','vel',time) plot_data(lon_grid,lat_grid,p_data[i,:],'atmospheric pressure','pres',time)
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/profile_model/radialprofiles.py
import numpy as np import math class RadialProfile(): def __init__(self,n,extent): self.profile = np.zeros(n,dtype=np.float64) self.rvals = np.zeros(n,dtype=np.float64) self.n = n self.extent = extent self.dr = extent/(n-1) for i in range(0,n): self.rvals[i] = i*self.dr def getValue(self,r): if r<0 or r>self.extent: return 0.0 else: k = int(r/self.dr) return self.rvals[k] class PressureProfile(RadialProfile): def __init__(self,n,extent,pcentral,deltap,rmax): super().__init__(n,extent) self.pcentral = pcentral self.deltap = deltap self.rmax = rmax class HollandPressureProfile(PressureProfile): def __init__(self,n,extent,pcentral,deltap,rmax,b): super().__init__(n,extent,pcentral,deltap,rmax) self.b = b for i in range(0,self.n): r = self.rvals[i] if r>0: p = self.pcentral + self.deltap*math.exp(-pow(self.rmax/r,b)) else: p = pcentral self.profile[i] = p class WindSpeedProfile(RadialProfile): def __init__(self,n,extent,rmax): super().__init__(n,extent) self.rmax = rmax self.vmax = 0 def getVmax(self): if self.vmax==0: for i in range(0,self.n): self.vmax = max(self.vmax,self.profile[i]) return self.vmax class HollandWindSpeedProfile(WindSpeedProfile): def __init__(self,n,extent,rmax,deltap,rho,f,b,coriolis=False): super().__init__(n,extent,rmax) self.units_factor = 100 # To convert the leading term to m/s # This factor comes from adopting millibars instead of Pascals, and km/hr instead of m/s. self.deltap = deltap self.rho = rho self.f = f self.b = b for i in range(0,self.n): r = self.rvals[i] if r>0: y = pow(rmax/r,b) exp_term = self.units_factor*(deltap/rho)*b*y*math.exp(-y) if coriolis == True: v = math.sqrt(exp_term + 0.25*pow(r,2)*pow(f,2))+0.5*r*f else: v = math.sqrt(exp_term) else: v = 0.0 self.profile[i] = v * 3.6 # to convert to km/h
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/structures/geogrid.py
import numpy as np class GeoGrid: def __init__(self, lon: np.ndarray, lat: np.ndarray): """ Constructor. :param lon: longitude of the grid in radians, as numpy array :param lat: latitude of the grid in radians, as numpy array """ self.lon = lon self.lat = lat self.ncells = len(lon) # ''' # A class that defines the structure, location, extent, and resolution of a geographic grid. # The grid is not the same as a geospatial raster, but is related in that, while a raster numbers vertical cells # starting from the top of the raster, the grid cells are numbered from the bottom. That is, a raster is oriented # like a raster of pixels, while the geographic grid is oriented like a regular Cartesian grid of cells. The # data in the grid is contained in a two-dimensional NumPy array. Because of this, the grid cell is indexed like # a Fortran array (column major indexing, i.e. i=column, j=row). # ''' # def __init__(self, lon, lat, nlon, nlat, cellsize, defaultValue=0.0): # ''' # Constructor. # :param lon: Lower-left longitude of the grid in decimal degrees. # :param lat: Lower-left latitude of the grid in decimal degrees. # :param nlon: The number of cells in longitude. # :param nlat: The number of cells in latitude. # :param cellsize: The size of a cell in the grid. # ''' # self.lon = lon # self.lat = lat # self.nlon = nlon # self.nlat = nlat # self.cellsize = cellsize # self.defaultValue = defaultValue # self.grid = np.zeros([nlat,nlon],dtype=np.float64) # self.bounds = [self.lon, self.lon + self.nlon*self.cellsize, # self.lat, self.lat + self.nlat*self.cellsize] # # # def put(self,i,j,v): # if self.indexInside(i,j): # self.grid[self.nlat-j-1,i]=v # # def getByIndex(self,i,j): # if self.indexInside(i,j): # return self.grid[self.nlat-j-1,i] # else: # return self.defaultValue # # def getByCoordinate(self,lon,lat): # if self.coordinateInside(lon,lat): # index = self.getIndex(lon,lat) # return self.getByIndex(index[0],index[1]) # else: # return self.defaultValue # # def clear(self): # self.grid.fill(0.0) # # def indexInside(self,i,j): # if i>=0 and i<self.nlon and j>=0 and j<self.nlat: # return True # else: # return False # # def coordinateInside(self,lon,lat): # if lon>=self.bounds[0] and lon<=self.bounds[1] and lat>=self.bounds[2] and lat<=self.bounds[3]: # return True # else: # return False # # def getOrigin(self): # return [self.lon,self.lat] # # def getCenter(self,i,j): # clon = self.lon + (i+0.5)*self.cellsize # clat = self.lat + (j+0.5)*self.cellsize # return [clon,clat] # # def getIndex(self,lon,lat): # i = int((lon-self.lon)/self.cellsize) # j = int((lat-self.lat)/self.cellsize) # return [i,j] #
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/hurricane/test_hurricane.py
import pytest from hurricane_model.hurricane import Hurricane def test_hurricane(): center = [1.0,2.0] # Position of the eye (lon,lat) in decimal degrees. extent = 100.0 # The maximum extent of the hurricane in kilometers. vforward = [3.0, 4.0] # Forward velocity [ve, vn] in km/hr. pcentral = 200.0 # Central pressure in millibars. deltap = 50.0 # Pressure difference in millibars. vmax = 15.0 # The maximum gradient wind speed in km/hr. b = 1.2 # The Holland parameter, conventionally in the range [0.5,2.5]. hurricane = Hurricane(center,extent) hurricane.setVForward(vforward[0],vforward[1]) hurricane.setPCentral(pcentral) hurricane.setDeltaP(deltap) hurricane.setVMax(vmax) hurricane.setB(b) assert hurricane.center == center assert hurricane.extent == extent assert hurricane.vforward == vforward assert hurricane.pcentral == pcentral assert hurricane.deltap == deltap assert hurricane.vmax == vmax assert hurricane.b == b
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/structures/test_geogrid.py
from structures.geogrid import GeoGrid def test_geogrid(): lon = -106.0 lat = 35 nlon = 8 nlat = 4 cellsize = 1.0 defaultValue = -1.0 grid = GeoGrid(lon,lat,nlon,nlat,cellsize,defaultValue = defaultValue) assert grid.lon == lon assert grid.lat == lat assert grid.nlon == nlon assert grid.nlat == nlat assert grid.cellsize == cellsize assert defaultValue == defaultValue l = int(nlat/2) k = int(nlon/2) for j in range(0,l): for i in range(0,k): grid.put(i,j,1.0) for i in range(k,nlon): grid.put(i,j,2.0) for j in range(l,nlat): for i in range(0,k): grid.put(i,j,3.0) for i in range(k,nlon): grid.put(i,j,4.0) for j in range(0,l): for i in range(0,k): assert grid.getByIndex(i,j) == 1.0 for i in range(k,nlon): assert grid.getByIndex(i,j) == 2.0 for j in range(l,nlat): for i in range(0,k): assert grid.getByIndex(i,j) == 3.0 for i in range(k,nlon): assert grid.getByIndex(i,j) == 4.0 testcell = [3,3] center = grid.getCenter(testcell[0],testcell[1]) centerx = lon + (testcell[0]+0.5)*cellsize centery = lat + (testcell[1]+0.5)*cellsize assert center[0] == centerx assert center[1] == centery index = grid.getIndex(centerx,centery) assert index[0] == testcell[0] assert index[1] == testcell[1] value = grid.getByIndex(testcell[0],testcell[1]) testcoords = grid.getCenter(testcell[0],testcell[1]) valuec = grid.getByCoordinate(testcoords[0],testcoords[1]) assert value == valuec origin = grid.getOrigin() assert origin[0] == lon assert origin[1] == lat bounds = grid.bounds assert bounds[0] == lon assert bounds[1] == lon + nlon*cellsize assert bounds[2] == lat assert bounds[3] == lat + nlat*cellsize assert grid.indexInside(-1,l) == False assert grid.indexInside(k,l) == True assert grid.indexInside(nlon,l) == False assert grid.indexInside(k,-1) == False assert grid.indexInside(k,l) == True assert grid.indexInside(k,nlat) == False assert grid.coordinateInside(bounds[0]+cellsize,bounds[2]+cellsize) == True assert grid.coordinateInside(bounds[0]-cellsize,bounds[2]+cellsize) == False assert grid.coordinateInside(bounds[0]+cellsize,bounds[2]-cellsize) == False assert grid.coordinateInside(bounds[1]-cellsize,bounds[2]+cellsize) == True assert grid.coordinateInside(bounds[1]-cellsize,bounds[2]-cellsize) == False assert grid.coordinateInside(bounds[1]+cellsize,bounds[2]+cellsize) == False assert grid.coordinateInside(bounds[0]+cellsize,bounds[3]-cellsize) == True assert grid.coordinateInside(bounds[0]+cellsize,bounds[3]+cellsize) == False assert grid.coordinateInside(bounds[0]-cellsize,bounds[3]+cellsize) == False assert grid.coordinateInside(bounds[1]-cellsize,bounds[3]-cellsize) == True assert grid.coordinateInside(bounds[1]-cellsize,bounds[3]+cellsize) == False assert grid.coordinateInside(bounds[1]+cellsize,bounds[3]-cellsize) == False grid.clear() for j in range(0,nlat): for i in range(0,nlon): assert grid.getByIndex(i,j) == 0.0
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/utils/test_gis.py
from geopy.distance import geodesic from utils.gis import geodistkm def test_gis(): albuquerque = [35.0844, -106.6504] #(lat,lon) los_alamos = [35.8800, -106.3031] #(lat,lon) result1 = geodesic(albuquerque,los_alamos).km result2 = geodistkm(albuquerque[1],albuquerque[0],los_alamos[1],los_alamos[0]) assert result1 == result2
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_parameters.py
from winds.wind_model import PROFILE_TYPE from winds.parameters import Parameters import math def test_parameters(): gridsize = [10, 10] nr = 100 wind_profile_type = PROFILE_TYPE.HOLLAND grid_position = [-106.0,35.0] cellsize = 2.0 siderealDay = 23.934 # A sidereal day in hrs. omega = 2.0 * math.pi / siderealDay # The Earth's rotation rate in rad/hr. rho = 1.225e9 # Air density at sea level in kg/m^3. distance_unit = 'kilometers' time_unit = 'hours' pressure_unit = 'millibars' # The Coriolis parameter should be 2*omega*sin(pi*|phi|/360), for phi in degrees latitude [-90,90]. params = Parameters(gridsize,nr,wind_profile_type) def eval_coriolis(lat,omega): return 2*omega * math.sin(math.pi*math.fabs(lat)/360)
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_velocities.py
from winds.velocities import Velocities import math def test_velocities(): # Forward velocity in km/hr. vfe = -1.0 # Eastward . vfn = 0.0 # Northward. vg = 1.0 # Tangential gradient wind speed in km/hr. veloc = Velocities(vfe,vfn) r = 1.0 # Unit circle about the origin. np = 360 dtheta = 2*math.pi/np with open('test_velocities_out.csv','wt') as out: out.write('x,y,vx,vy,r,theta_degrees\n') for i in range(0,np): theta = i*dtheta degrees = 180.0*theta/math.pi x = r*math.cos(theta) y = r*math.sin(theta) v = veloc.compute_wind_vector(vg,x,y) out.write(str(x)+','+str(y)+','+str(v[0])+','+str(v[1])+','+str(r)+','+str(degrees)+'\n')
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_velocity_grid.py
import numpy as np from structures.geogrid import GeoGrid from profile_model.radialprofiles import HollandWindSpeedProfile from winds.parameters import Parameters from winds.velocities import Velocities import matplotlib.pyplot as plt def test_velocity_grid(): # Grid of x, y points n = 50 nr = 200 rmax = 40 cmin, cmax = -200 , 200 cellsize = (cmax-cmin)/n x = np.linspace(cmin, cmax, n) y = np.linspace(cmin, cmax, n) U = GeoGrid(cmin,cmin,n,n,cellsize) V = GeoGrid(cmin,cmin,n,n,cellsize) params = Parameters() b = 1.4 hc = [0,0] vf = [0,10] deltap = 100 coriol = False profile = HollandWindSpeedProfile(nr,2*cmax,rmax,deltap,params.rho,params.getCoriolisMid(),b,coriolis=coriol) vels = Velocities(vf[0],vf[1],profile.getVmax()) for j in range(0,n): for i in range(0,n): pt = U.getCenter(i,j) r = np.sqrt(pow(pt[0]-hc[0],2)+pow(pt[1]-hc[1],2)) vg = profile.getValue(r) vv = vels.compute_wind_vector(vg,pt[0],pt[1]) U.put(i,j,vv[0]) V.put(i,j,vv[1]) assert True # If we made it to here. fig = plt.figure() ax = fig.add_subplot(131) ax.plot(profile.rvals, profile.profile) ax.set(xlabel='r (km)', ylabel='wind speed (km/hr)', title='Radial Wind') ax1 = fig.add_subplot(133) # Plot the streamlines. # Matplotlib assume an ordinary row ordering, so the rows must be reversed before plotting. Ug = U.grid Vg = V.grid Uplt = np.zeros([n,n]) Vplt = np.zeros([n,n]) for j in range(0,n): jp = n-j-1 for i in range(0,n): Uplt[jp,i]=Ug[j,i] Vplt[jp,i]=Vg[j,i] Vmag = np.sqrt(Ug*Ug+Vg*Vg) ax1.streamplot(x, y, Uplt, Vplt, color=Vmag, cmap='Spectral') ax1.set_xlabel('$x$') ax1.set_ylabel('$y$') ax1.set_xlim(cmin,cmax) ax1.set_ylim(cmin,cmax) ax1.set_aspect('equal') plt.title('Wind Vectors') plt.show()
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/utils/gis.py
from geopy.distance import geodesic def geodistkm(x1,y1,x2,y2): ''' Returns the geodesic distance in km given two pairs of (lon, lat) coordinates. Note: Because it uses geopy, the coordinate order is reversed to (lat,lon) before calling the geopy function. :param x1: lon of the first point. :param y1: lat of the first point. :param x2: lon of the second point. :param y2: lat of the second point. :return: Geodesic distance between the two points in km. ''' return geodesic((y1,x1),(y2,x2)).km
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/utils/math.py
def sign(x): if(x>=0): return 1 else: return -1
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/parameters.py
import math from winds.wind_model import PROFILE_TYPE class Parameters: def __init__(self, mean_lat: float, wind_profile_type=PROFILE_TYPE.HOLLAND): """ Constructor. :param mean_lat: mean latitude of the hurricane trajectory to compute the Coroilis factor in radians Units are km, hr, and millibars for distance, wind, and pressure respectively, and lat in decimal degrees. """ self.siderealDay = 23.934 # A sidereal day in hrs. self.omega = 2.0 * math.pi / self.siderealDay # The Earth's rotation rate in rad/hr. self.rho = 1.15 # Air density at sea level in kg/m^3. self.wind_profile_type = wind_profile_type # The particular wind profile model being used. # Earth radius in km self.earth_radius = 6371.1 def get_coriolis(self, lat: float) -> float: """ Returns the Coriolis parameter for a given latitude. :param lat: in radians :return: coriolis factor in rad/s to be consistent with Holland's model units """ # The Coriolis parameter = 2*omega*sin(|phi|) # 3600 to convert omega in rad/s return 2.0 * self.omega / 3600. * math.sin(math.fabs(lat)) def get_pressure_unit(self): return 'millibars' def get_distance_unit(self): return 'kilometers' def get_time_unit(self): return 'hours'
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/velocities.py
import math class Velocities: def __init__(self, vfe, vfn, vmax): """ Initialize with the forward velocity components. :param vfe: Eastward forward velocity (x-component in the Earth frame) in km/hr. :param vfn: Northward forward velocity component (y-component in the Earth frame) in km/hr. """ self.vf = [] self.vfmagn = [] self.xunitv = [] self.yunitv = [] self.set_vforward(vfe, vfn) self.vmax = vmax def set_vforward(self, vfe, vfn): self.vf = [vfe, vfn] self.vfmagn = math.sqrt(pow(vfe, 2) + pow(vfn, 2)) self.xunitv = [vfn/self.vfmagn, -vfe/self.vfmagn] self.yunitv = [vfe/self.vfmagn, vfn/self.vfmagn] def compute_wind_vector(self, vg, xe, yn): """ Returns the velocity components [ve,vn] given the tangential gradient wind speed. :param vg: The tangential (theta) gradient wind speed in the hurricane frame in km/hr. :param xe: The eastern component of position relative to the local origin (the hurricane eye) in km. :param yn: The northern component of position relative to the local origin (the hurricane eye) in km. :return: [ve,vn] the eastward and nortward components of the wind velocity in the Earth frame in km/hr. """ rmagn = math.sqrt(xe*xe + yn*yn) costheta = (xe*self.xunitv[0] + yn*self.xunitv[1])/rmagn sintheta = -(xe*self.xunitv[1] - yn*self.xunitv[0])/rmagn theta_unitv = [-sintheta*self.xunitv[0]+costheta*self.yunitv[0], -sintheta*self.xunitv[1]+costheta*self.yunitv[1]] vgtheta = [theta_unitv[0]*vg, theta_unitv[1]*vg] vfcorr = vg/self.vmax ve = self.vf[0]*vfcorr + vgtheta[0] vn = self.vf[1]*vfcorr + vgtheta[1] return [ve, vn]
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/wind_model.py
from enum import Enum import numpy as np import winds.parameters as Parameters import hurricane_model as Hurricane import structures as Geogrid import matplotlib.pyplot as plt import math class PROFILE_TYPE(Enum): HOLLAND = 'holland' WILLOUGHBY = 'willoughby' class WindModel: def __init__(self, params: Parameters, curr_hurricane: Hurricane, grid: Geogrid): self.profile_type = params.wind_profile_type if self.profile_type == PROFILE_TYPE.HOLLAND: # Distance between the hurricane eye and the grid points # Great circle distance in km r = np.power(np.sin((grid.lat - curr_hurricane.center[1]) * 0.5), 2) + \ np.cos(grid.lat) * np.cos(curr_hurricane.center[1]) * \ np.power(np.sin((grid.lon - curr_hurricane.center[0]) * 0.5), 2) r = 2.0 * params.earth_radius * np.arcsin(np.sqrt(r)) # Compute pressure self.pressure_profile = holland_pressure_profile(curr_hurricane, r) # Compute wind speed self.wind_speed_profile = holland_windspeed_profile(params, curr_hurricane, r) # plt.scatter(grid.lon, grid.lat, s=10., c=self.wind_speed_profile, alpha=1.) # plt.show() # Compute wind components self.u, self.v = compute_components(self.wind_speed_profile, curr_hurricane, grid) else: raise 'Profile models other than Holland are not currently supported.' def holland_pressure_profile(hurricane: Hurricane, r: np.ndarray): """ :param hurricane: class type Hurricane :param r: distance between the eye of the hurricane and the grid points in km """ return hurricane.pcentral + hurricane.deltap * np.exp(-np.power(hurricane.extent / r ,hurricane.b)) def holland_windspeed_profile(params: Parameters, hurricane: Hurricane, r: np.ndarray, coriolis=False): """ :param params: class parameters :param hurricane: class Hurricane :param r: distance between the eye of the hurricane and the grid points in km :param coriolis: coriolis factor in rad/hrs """ # Holland equation assumes: # deltap in Pa # density in kg/m3 # and returns m/s units_factor = 100. # To convert the deltap from mbar to Pascals y = np.power(hurricane.extent / r, hurricane.b) exp_term = units_factor*(hurricane.deltap / params.rho) * hurricane.b * y * np.exp(-y) if coriolis is True: v = np.sqrt(exp_term + 0.25 * np.power(r * params.f, 2)) + 0.5 * r * params.f else: v = np.sqrt(exp_term) v *= 3.6 # Conversion from m/s to km/h return v def compute_components(wind_speed_profile, curr_hurricane: Hurricane, grid: Geogrid) -> (np.ndarray, np.ndarray): # Compute components of vg theta = np.arctan2(grid.lat - curr_hurricane.center[1], grid.lon - curr_hurricane.center[0]) theta += math.pi * 0.5 vg_x = wind_speed_profile * np.cos(theta) vg_y = wind_speed_profile * np.sin(theta) # Compute total velocity ratio = wind_speed_profile / curr_hurricane.vmax u = vg_x + curr_hurricane.vforward[0] * ratio v = vg_y + curr_hurricane.vforward[1] * ratio return u, v
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds_io/import_data.py
import json from netCDF4 import Dataset import numpy as np import math from hurricane_model import hurricane from structures import geogrid import datetime def read_grid_file(grid_filename: str, grid_flag: int) -> (float, float): if grid_flag == 1: xll, yll, cellsize, numcells_lat, numcells_lon = read_raster_inputs(grid_filename) lon, lat = setup_regular_grid(xll, yll, cellsize, numcells_lat, numcells_lon) else: lon, lat = read_netcdf(grid_filename) return lon, lat def read_input_file(filename: str) -> (str, int, str, float, float): try: f = open(filename, "r") except FileNotFoundError as fnf_error: raise fnf_error traj_filename = f.readline().rstrip('\n') grid_flag = f.readline().rstrip('\n').split() grid_flag = int(grid_flag[0]) grid_filename = f.readline().rstrip('\n') ambient_pressure = f.readline().rstrip('\n').split() ambient_pressure = float(ambient_pressure[0]) holland_b_param = f.readline().rstrip('\n').split() holland_b_param = float(holland_b_param[0]) f.close() return traj_filename, grid_flag, grid_filename, ambient_pressure, holland_b_param def setup_regular_grid(xll: float, yll: float, cellsize: float, numcells_lat: int, numcells_lon: int) -> (float, float): npoints = numcells_lat * numcells_lon lon = np.zeros((npoints, )) lat = np.zeros((npoints, )) k = 0 for i in range(0, numcells_lon): for j in range(0, numcells_lat): lon[k] = xll + (float(i) + 0.5) * cellsize lat[k] = yll + (float(j) + 0.5) * cellsize k += 1 lat = lat * math.pi / 180. # Convert to radians lon = lon * math.pi / 180. # Convert to radians return lon, lat def read_raster_inputs(filename: str) -> (float, float, float, int, int): try: f = open(filename, "r") except FileNotFoundError as fnf_error: raise fnf_error # longitude of the south west corner in deg temp = f.readline().rstrip('\n').split() xll = float(temp[0]) # latitude of the south west corner in deg temp = f.readline().rstrip('\n').split() yll = float(temp[0]) # cell size in deg temp = f.readline().rstrip('\n').split() cellsize = float(temp[0]) # number of cells for latitude temp = f.readline().rstrip('\n').split() numcells_lat = int(temp[0]) # number of cells for longitude temp = f.readline().rstrip('\n').split() numcells_lon = int(temp[0]) f.close() return xll, yll, cellsize, numcells_lat, numcells_lon def read_json(filename: str): try: with open(filename) as json_data: json_raw = json.load(json_data) return json_raw except FileNotFoundError as fnf_error: raise fnf_error def read_netcdf(filename: str) -> (float, float): # http://unidata.github.io/netcdf4-python/#section1 # lat and lon from the netCDF file are assumed in radians try: nc = Dataset(filename) temp_lat = nc.variables['latCell'][:] temp_lon = nc.variables['lonCell'][:] # Convert to numpy array for subsequent processing lat = np.array(temp_lat) lon = np.array(temp_lon) - 2. * math.pi for i in range(0, len(lon)): if lon[i] <= -math.pi: lon[i] += 2. * math.pi return lon, lat except FileNotFoundError as fnf_error: raise fnf_error def initialize_hurricane(traj_filename: str, ambient_pressure: float, holland_b_param: float) -> list: # JSON Specs # "timeUnits": "hours", # "distanceUnits": "miles", # "windspeedUnits": "knots", # "pressureUnits": "mb", json_raw = read_json(traj_filename) ref_date = datetime.datetime.strptime(json_raw['initialTime'],'%Y-%m-%d_%H:%M:%S') curr_hurricane = [] traj = json_raw['stormTrack']['features'] for it in range(0, len(traj)): coord = traj[it]['geometry']['coordinates'] center_coord = [x * math.pi / 180. for x in coord] # degree to rad extent = traj[it]['properties']['rMax'] * 1.60934 # miles to km pmin = traj[it]['properties']['minP'] # in mbar deltap = ambient_pressure - pmin # in mbar time = traj[it]['properties']['time'] # in hrs vmax = traj[it]['properties']['wMax'] * 1.852 # from knots to km/h curr_hurricane.append(hurricane.Hurricane(tuple(center_coord), extent, pmin, deltap, vmax, holland_b_param, time, ref_date)) # Compute the components of the forward velocity for it in range(0, len(traj) - 1): x1 = curr_hurricane[it].center[0] y1 = curr_hurricane[it].center[1] x2 = curr_hurricane[it + 1].center[0] y2 = curr_hurricane[it + 1].center[1] theta = math.atan2(y2 - y1, x2 - x1) vf = traj[it]['properties']['vf'] * 1.852 curr_hurricane[it].set_vf((vf * math.cos(theta), vf * math.sin(theta))) return curr_hurricane def initialize_grid(grid_filename: str, grid_flag: int) -> geogrid.GeoGrid: lon, lat = read_grid_file(grid_filename, grid_flag) return geogrid.GeoGrid(lon, lat)
/testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds_io/output_data.py
import netCDF4 import numpy as np import hurricane_model as Hurricane import structures as Geogrid import winds_io as WindModel import matplotlib.pyplot as plt import datetime def write_netcdf(filename: str, curr_hurricane: Hurricane, grid: Geogrid, winds: WindModel): # http://unidata.github.io/netcdf4-python/#section1 rootgrp = netCDF4.Dataset(filename, "w", format="NETCDF3_64BIT_OFFSET") # Declare dimensions rootgrp.createDimension('nCells',grid.ncells) rootgrp.createDimension('StrLen',64) rootgrp.createDimension('Time',None) # Declare variables time = rootgrp.dimensions['Time'].name ncells = rootgrp.dimensions['nCells'].name time_var = rootgrp.createVariable('xtime','S1',('Time','StrLen')) u_var = rootgrp.createVariable('windSpeedU',np.float64,(time,ncells)) v_var = rootgrp.createVariable('windSpeedV',np.float64,(time,ncells)) pres_var = rootgrp.createVariable('atmosPressure',np.float64,(time,ncells)) # Format time ref_date = curr_hurricane[0].ref_time xtime = [] for it in range(0,len(curr_hurricane)): t = curr_hurricane[it].time date = ref_date + datetime.timedelta(hours=np.float64(t)) xtime.append(date.strftime('%Y-%m-%d_%H:%M:%S'+45*' ')) xtime = np.asarray(xtime) xtime_list = [] for t in xtime: xtime_list.append(list(t)) time_var[:] = xtime_list # Assign variables kmh_to_mps = 0.277778 mbar_to_pa = 100.0 for it in range(0, len(curr_hurricane)-1): u_var[it, :] = winds[it].u * kmh_to_mps v_var[it, :] = winds[it].v * kmh_to_mps pres_var[it, :] = winds[it].pressure_profile * mbar_to_pa # Close rootgrp.close()
/testing_and_setup/compass/ocean/hurricane/scripts/interpolate_time_varying_forcing.py
# Author: Steven Brus # Date: August, 2019 # Description: Interpolates CFSR atmospheric reanalysis data onto the MPAS-O mesh and # creates an input file to support time varying atmospheric forcing in the model import netCDF4 import matplotlib.pyplot as plt import numpy as np import glob import pprint import datetime import os import yaml import subprocess import argparse import cartopy import cartopy.crs as ccrs import cartopy.feature as cfeature from scipy import interpolate import write_forcing_file plt.switch_backend('agg') cartopy.config['pre_existing_data_dir'] = \ os.getenv('CARTOPY_DIR', cartopy.config.get('pre_existing_data_dir')) ################################################################################################## ################################################################################################## def interpolate_data_to_grid(grid_file,data_file,var): # Open files data_nc = netCDF4.Dataset(data_file,'r') grid_nc = netCDF4.Dataset(grid_file,'r') # Get grid from data file lon_data = data_nc.variables['lon'][:] lon_data = np.append(lon_data,360.0) lat_data = np.flipud(data_nc.variables['lat'][:]) time = data_nc.variables['time'][:] nsnaps = time.size nlon = lon_data.size nlat = lat_data.size data = np.zeros((nsnaps,nlat,nlon)) print(data.shape) # Get grid from grid file lon_grid = grid_nc.variables['lonCell'][:]*180.0/np.pi lat_grid = grid_nc.variables['latCell'][:]*180.0/np.pi grid_points = np.column_stack((lon_grid,lat_grid)) ncells = lon_grid.size interp_data = np.zeros((nsnaps,ncells)) print(interp_data.shape) print(np.amin(lon_grid),np.amax(lon_grid)) print(np.amin(lat_grid),np.amax(lat_grid)) # Interpolate timesnaps for i,t in enumerate(time): print('Interpolating '+var+': '+str(i)) # Get data to interpolate data[i,:,0:-1] = np.flipud(data_nc.variables[var][i,:,:]) data[i,:,-1] = data[i,:,0] # Interpolate data onto new grid interpolator = interpolate.RegularGridInterpolator((lon_data,lat_data),data[i,:,:].T,bounds_error=False,fill_value=0.0) interp_data[i,:] = interpolator(grid_points) # Deal with time ref_date = data_nc.variables['time'].getncattr('units').replace('hours since ','').replace('.0 +0:00','') ref_date = datetime.datetime.strptime(ref_date,'%Y-%m-%d %H:%M:%S') xtime = [] for t in time: date = ref_date + datetime.timedelta(hours=np.float64(t)) xtime.append(date.strftime('%Y-%m-%d_%H:%M:%S'+45*' ')) xtime = np.array(xtime,'S64') return lon_grid,lat_grid,interp_data,lon_data,lat_data,data,xtime ################################################################################################## ################################################################################################## def plot_interp_data(lon_data,lat_data,data,lon_grid,lat_grid,interp_data,var_label,var_abrev,time): # Plot data fig = plt.figure() levels = np.linspace(np.amin(data),np.amax(data),100) ax0 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree()) cf = ax0.contourf(lon_data, lat_data, data, levels=levels, transform=ccrs.PlateCarree()) ax0.set_extent([0, 359.9, -90, 90], crs=ccrs.PlateCarree()) ax0.add_feature(cfeature.LAND, zorder=100) ax0.add_feature(cfeature.LAKES, alpha=0.5, zorder=101) ax0.add_feature(cfeature.COASTLINE, zorder=101) ax0.set_title('data '+time.strip().decode()) cbar = fig.colorbar(cf,ax=ax0) cbar.set_label(var_label) # Plot interpolated data ax1 = fig.add_subplot(2, 1, 2, projection=ccrs.PlateCarree()) levels = np.linspace(np.amin(interp_data),np.amax(interp_data),100) cf = ax1.tricontourf(lon_grid,lat_grid,interp_data,levels=levels, transform=ccrs.PlateCarree()) ax1.set_extent([0, 359.9, -90, 90], crs=ccrs.PlateCarree()) ax1.add_feature(cfeature.LAND, zorder=100) ax1.add_feature(cfeature.LAKES, alpha=0.5, zorder=101) ax1.add_feature(cfeature.COASTLINE, zorder=101) ax1.set_title('interpolated data '+time.strip().decode()) cbar = fig.colorbar(cf,ax=ax1) cbar.set_label(var_label) # Save figure fig.tight_layout() fig.savefig(var_abrev+'_'+str(i).zfill(4)+'.png',box_inches='tight') plt.close() ################################################################################################## ################################################################################################## if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--plot',action='store_true') args = parser.parse_args() nplot = 10 # Files to interpolate to/from grid_file = './mesh.nc' wind_file = './wnd10m.nc' pres_file = './prmsl.nc' forcing_file = 'atmospheric_forcing.nc' # Interpolation of u and v velocities lon_grid,lat_grid,u_interp,lon_data,lat_data,u_data,xtime = interpolate_data_to_grid(grid_file,wind_file,'U_GRD_L103') lon_grid,lat_grid,v_interp,lon_data,lat_data,v_data,xtime = interpolate_data_to_grid(grid_file,wind_file,'V_GRD_L103') # Calculate and plot velocity magnitude if args.plot: for i in range(u_data.shape[0]): if i % nplot == 0: print('Plotting vel: '+str(i)) data = np.sqrt(np.square(u_data[i,:,:]) + np.square(v_data[i,:,:])) interp_data = np.sqrt(np.square(u_interp[i,:]) + np.square(v_interp[i,:])) plot_interp_data(lon_data,lat_data,data,lon_grid,lat_grid,interp_data,'velocity magnitude','vel',xtime[i]) # Interpolation of atmospheric pressure lon_grid,lat_grid,p_interp,lon_data,lat_data,p_data,xtime = interpolate_data_to_grid(grid_file,pres_file,'PRMSL_L101') # Plot atmopheric pressure if args.plot: for i in range(p_data.shape[0]): if i % nplot == 0: print('Plotting pres: '+str(i)) plot_interp_data(lon_data,lat_data,p_data[i,:,:],lon_grid,lat_grid,p_interp[i,:],'atmospheric pressure','pres',xtime[i]) # Write to NetCDF file subprocess.call(['rm',forcing_file]) write_forcing_file.write_to_file(forcing_file,u_interp,'windSpeedU',xtime) write_forcing_file.write_to_file(forcing_file,v_interp,'windSpeedV',xtime) write_forcing_file.write_to_file(forcing_file,p_interp,'atmosPressure',xtime)
/testing_and_setup/compass/ocean/hurricane/scripts/spinup_time_varying_forcing.py
# Author: Steven Brus # Date April, 2020 # Description: # This creates a "dummy" time varying forcing file # with zero wind zero atmospheric pressure perturbation # for the tidal spinup run. # # The tidal spinup is run using this "dummy" atmospheric forcing # because the time varying atmospheric forcing for the # forward run requires information in the restart file. # The inclusion of this additional information in the restart # file is trigged by the use of time varying atmospheric forcing # in the tidal spinup. import netCDF4 import matplotlib.pyplot as plt import numpy as np import glob import pprint import datetime import os import yaml import subprocess import argparse import write_forcing_file plt.switch_backend('agg') ################################################################################################## ################################################################################################## if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--start_time') parser.add_argument('--spinup_length') args = parser.parse_args() # Files to interpolate to/from grid_file = './mesh.nc' forcing_file = 'spinup_atmospheric_forcing.nc' # Setup timestamps # (3 time snaps are needed because new data will be read in at the end of the simulation) dtformat = '%Y-%m-%d_%H:%M:%S' start_time = datetime.datetime.strptime(args.start_time,dtformat) spinup_length = float(args.spinup_length) xtime = [] xtime.append(args.start_time+45*' ') next_time = start_time + datetime.timedelta(days=spinup_length) xtime.append(datetime.datetime.strftime(next_time,dtformat)+45*' ') next_time = next_time + datetime.timedelta(days=spinup_length) xtime.append(datetime.datetime.strftime(next_time,dtformat)+45*' ') xtime = np.array(xtime,'S64') print(xtime) # Get grid from grid file grid_nc = netCDF4.Dataset(grid_file,'r') lon_grid = grid_nc.variables['lonCell'][:] ncells = lon_grid.size # Initialize atmospheric forcing fields u_data = np.zeros((3,ncells)) v_data = np.zeros((3,ncells)) p_data = np.zeros((3,ncells)) + 101325.0 print(p_data.shape) # Write to NetCDF file subprocess.call(['rm',forcing_file]) write_forcing_file.write_to_file(forcing_file,u_data,'windSpeedU',xtime) write_forcing_file.write_to_file(forcing_file,v_data,'windSpeedV',xtime) write_forcing_file.write_to_file(forcing_file,p_data,'atmosPressure',xtime)
/testing_and_setup/compass/ocean/hurricane/scripts/write_forcing_file.py
# Author: Steven Brus # Date: April, 2020 # Description: This function writes time-varying forcing data to an input file for the model run. import os import numpy as np import netCDF4 ################################################################################################## ################################################################################################## def write_to_file(filename,data,var,xtime): if os.path.isfile(filename): data_nc = netCDF4.Dataset(filename,'a', format='NETCDF3_64BIT_OFFSET') else: data_nc = netCDF4.Dataset(filename,'w', format='NETCDF3_64BIT_OFFSET') # Find dimesions ncells = data.shape[1] nsnaps = data.shape[0] # Declare dimensions data_nc.createDimension('nCells',ncells) data_nc.createDimension('StrLen',64) data_nc.createDimension('Time',None) # Create time variable time = data_nc.createVariable('xtime','S1',('Time','StrLen')) time[:,:] = netCDF4.stringtochar(xtime) # Set variables data_var = data_nc.createVariable(var,np.float64,('Time','nCells')) data_var[:,:] = data[:,:] data_nc.close() ################################################################################################## ##################################################################################################
/testing_and_setup/compass/ocean/internal_waves/5km/rpe_test/plot.py
import numpy from netCDF4 import Dataset import matplotlib.pyplot as plt import matplotlib matplotlib.use('Agg') fig = plt.gcf() nRow = 4 nCol = 2 nu = ['0.01', '1', '15', '150'] iTime = [1, 2] time = ['day 10', 'day 20'] fig, axs = plt.subplots(nRow, nCol, figsize=( 4.0 * nCol, 3.7 * nRow), constrained_layout=True) for iRow in range(nRow): ncfile = Dataset('output_' + str(iRow + 1) + '.nc', 'r') var = ncfile.variables['temperature'] xtime = ncfile.variables['xtime'] for iCol in range(nCol): ax = axs[iRow, iCol] dis = ax.imshow(var[iTime[iCol], 0::4, :].T, extent=[ 0, 250, 500, 0], aspect='0.5', cmap='jet', vmin=10, vmax=20) if iRow == nRow - 1: ax.set_xlabel('x, km') if iCol == 0: ax.set_ylabel('depth, m') if iCol == nCol - 1: fig.colorbar(dis, ax=axs[iRow, iCol], aspect=10) ax.set_title(time[iCol] + ", " + r"$\nu_h=$" + nu[iRow]) ncfile.close() plt.savefig('sections_internal_waves.png')
/testing_and_setup/compass/ocean/jigsaw_to_MPAS/build_mesh.py
#!/usr/bin/env python """ This script performs the first step of initializing the global ocean. This includes: Step 1. Build cellWidth array as function of latitude and longitude Step 2. Build mesh using JIGSAW Step 3. Convert triangles from jigsaw format to netcdf Step 4. Convert from triangles to MPAS mesh Step 5. Create vtk file for visualization """ from __future__ import absolute_import, division, print_function, \ unicode_literals import subprocess import os import xarray import argparse import matplotlib.pyplot as plt from mpas_tools.conversion import convert from mpas_tools.io import write_netcdf from jigsaw_to_MPAS.jigsaw_driver import jigsaw_driver from jigsaw_to_MPAS.triangle_jigsaw_to_netcdf import jigsaw_to_netcdf from jigsaw_to_MPAS.inject_bathymetry import inject_bathymetry from jigsaw_to_MPAS.inject_meshDensity import inject_meshDensity from jigsaw_to_MPAS.inject_preserve_floodplain import \ inject_preserve_floodplain from define_base_mesh import define_base_mesh def build_mesh( preserve_floodplain=False, floodplain_elevation=20.0, do_inject_bathymetry=False, geometry='sphere', plot_cellWidth=True): if geometry == 'sphere': on_sphere = True else: on_sphere = False print('Step 1. Build cellWidth array as function of horizontal coordinates') if on_sphere: cellWidth, lon, lat = define_base_mesh.cellWidthVsLatLon() da = xarray.DataArray(cellWidth, dims=['lat', 'lon'], coords={'lat': lat, 'lon': lon}, name='cellWidth') cw_filename = 'cellWidthVsLatLon.nc' da.to_netcdf(cw_filename) plot_cellWidth=True if plot_cellWidth: import matplotlib from cartopy import config import cartopy.crs as ccrs matplotlib.use('Agg') fig = plt.figure() fig.set_size_inches(16.0, 8.0) plt.clf() ax = plt.axes(projection=ccrs.PlateCarree()) ax.set_global() im = ax.imshow(cellWidth, origin='lower', transform=ccrs.PlateCarree( ), extent=[-180, 180, -90, 90], cmap='jet') ax.coastlines() gl = ax.gridlines( crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='gray', alpha=0.5, linestyle='-') gl.xlabels_top = False gl.ylabels_right = False plt.title('Grid cell size, km') plt.colorbar(im, shrink=.60) plt.savefig('cellWidthGlobal.png') else: cellWidth, x, y, geom_points, geom_edges = define_base_mesh.cellWidthVsXY() da = xarray.DataArray(cellWidth, dims=['y', 'x'], coords={'y': y, 'x': x}, name='cellWidth') cw_filename = 'cellWidthVsXY.nc' da.to_netcdf(cw_filename) print('Step 2. Generate mesh with JIGSAW') if on_sphere: jigsaw_driver(cellWidth, lon, lat) else: jigsaw_driver( cellWidth, x, y, on_sphere=False, geom_points=geom_points, geom_edges=geom_edges) print('Step 3. Convert triangles from jigsaw format to netcdf') jigsaw_to_netcdf(msh_filename='mesh-MESH.msh', output_name='mesh_triangles.nc', on_sphere=on_sphere) print('Step 4. Convert from triangles to MPAS mesh') write_netcdf(convert(xarray.open_dataset('mesh_triangles.nc')), 'base_mesh.nc') print('Step 5. Inject correct meshDensity variable into base mesh file') inject_meshDensity(cw_filename=cw_filename, mesh_filename='base_mesh.nc', on_sphere=on_sphere) if do_inject_bathymetry: print('Step 6. Injecting bathymetry') inject_bathymetry(mesh_file='base_mesh.nc') if preserve_floodplain: print('Step 7. Injecting flag to preserve floodplain') inject_preserve_floodplain(mesh_file='base_mesh.nc', floodplain_elevation=floodplain_elevation) print('Step 8. Create vtk file for visualization') args = ['paraview_vtk_field_extractor.py', '--ignore_time', '-l', '-d', 'maxEdges=0', '-v', 'allOnCells', '-f', 'base_mesh.nc', '-o', 'base_mesh_vtk'] print("running", ' '.join(args)) subprocess.check_call(args, env=os.environ.copy()) print("***********************************************") print("** The global mesh file is base_mesh.nc **") print("***********************************************") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--preserve_floodplain', action='store_true') parser.add_argument('--floodplain_elevation', action='store', type=float, default=20.0) parser.add_argument('--inject_bathymetry', action='store_true') parser.add_argument('--geometry', default='sphere') parser.add_argument('--plot_cellWidth', action='store_true') cl_args = parser.parse_args() build_mesh(cl_args.preserve_floodplain, cl_args.floodplain_elevation, cl_args.inject_bathymetry, cl_args.geometry, cl_args.plot_cellWidth)
/testing_and_setup/compass/ocean/lock_exchange/0.5km/rpe_test/plot.py
import numpy from netCDF4 import Dataset import matplotlib.pyplot as plt import matplotlib matplotlib.use('Agg') fig = plt.gcf() nRow = 6 nCol = 2 iTime = [8, 16] nu = ['0.01', '0.1', '1', '10', '100', '200'] time = ['hour 8', 'hour 16'] fig, axs = plt.subplots(nRow, nCol, figsize=( 5.3 * nCol, 2.0 * nRow), constrained_layout=True) for iRow in range(nRow): ncfile = Dataset('output_' + str(iRow + 1) + '.nc', 'r') var = ncfile.variables['temperature'] xtime = ncfile.variables['xtime'] for iCol in range(nCol): ax = axs[iRow, iCol] dis = ax.imshow(var[iTime[iCol], 0:512:4, :].T, extent=[ 0, 120, 20, 0], aspect=2, cmap='jet', vmin=5, vmax=30) if iRow == nRow - 1: ax.set_xlabel('x, km') if iCol == 0: ax.set_ylabel('depth, m') if iCol == nCol - 1: fig.colorbar(dis, ax=axs[iRow, iCol], aspect=5) ax.set_title(time[iCol] + ", " + r"$\nu_h=$" + nu[iRow]) ncfile.close() plt.savefig('sections_lock_exchange.png', bbox_inches='tight')
/testing_and_setup/compass/ocean/overflow/1km/rpe_test/plot.py
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from netCDF4 import Dataset import numpy fig = plt.gcf() fig.set_size_inches(8.0,10.0) nRow=1 #6 nCol=2 nu=['0.01','0.1','1','10','100','1000'] iTime=[3,6] time=['3 hrs','6 hrs'] for iRow in range(nRow): ncfile = Dataset('output_'+str(iRow+1)+'.nc','r') var = ncfile.variables['temperature'] xtime = ncfile.variables['xtime'] for iCol in range(nCol): plt.subplot(nRow, nCol, iRow*nCol+iCol+1) ax = plt.imshow(var[iTime[iCol],0::4,:].T,extent=[0,200,2000,0],aspect=2) plt.clim([10,20]) plt.jet() if iRow==nRow-1: plt.xlabel('x, km') if iCol==0: plt.ylabel('depth, m') plt.colorbar() #print(xtime[iTime[iCol],11:13]) plt.title('time='+time[iCol]+', nu='+nu[iRow]) ncfile.close() plt.savefig('sections_overflow.png')
/testing_and_setup/compass/ocean/surface_waves/analysis/comparison.py
#!/usr/bin/env python """ Tidal channel comparison betewen MPAS-O and analytical forcing result. Phillip J. Wolfram 04/12/2019 """ import numpy as np import xarray as xr import matplotlib.pyplot as plt # render statically by default plt.switch_backend('agg') # analytical case x = np.linspace(0,24,100) y = np.sin(x*2*np.pi/24) plt.plot(x,y, lw=3, color='black', label='analytical') # data from MPAS-O on boundary ds = xr.open_mfdataset('output.nc') mask = ds.where(ds.yCell.values.min() == ds.yCell) mask.ssh.mean('nCells').plot(marker='o', label='MPAS-O') plt.legend() plt.ylabel('ssh amplitude (m)') plt.xlabel('Time (min)') plt.savefig('tidalcomparison.png')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Powercoders-International/ft-web-dev
refs/heads/main
{"/05-django/solutions/exercise-3-models/shop/admin.py": ["/05-django/solutions/exercise-3-models/shop/models.py"], "/05-django/solutions/exercise-3-models/shop/views.py": ["/05-django/solutions/exercise-3-models/shop/models.py"], "/05-django/solutions/exercise-4-restframework/shop/views.py": ["/05-django/solutions/exercise-3-models/shop/models.py", "/05-django/solutions/exercise-5-filters/shop/serializers.py"], "/05-django/solutions/exercise-5-filters/shop/filters.py": ["/05-django/solutions/exercise-3-models/shop/models.py"], "/05-django/solutions/exercise-5-filters/shop/serializers.py": ["/05-django/solutions/exercise-3-models/shop/models.py"], "/05-django/solutions/exercise-5-filters/shop/urls.py": ["/05-django/solutions/exercise-4-restframework/shop/views.py"], "/05-django/solutions/exercise-2-static/shop/urls.py": ["/05-django/solutions/exercise-3-models/shop/views.py", "/05-django/solutions/exercise-2-static/shop/views.py"]}
└── └── 05-django └── solutions ├── exercise-2-static │ └── shop │ ├── urls.py │ └── views.py ├── exercise-3-models │ └── shop │ ├── admin.py │ ├── models.py │ └── views.py ├── exercise-4-restframework │ └── shop │ └── views.py └── exercise-5-filters └── shop ├── filters.py ├── serializers.py └── urls.py
/05-django/solutions/exercise-2-static/shop/urls.py
from django.urls import path from shop.views import view_article from shop.views import view_articles urlpatterns = [ path('articles/', view_articles), path('articles/<int:id>/', view_article), ]
/05-django/solutions/exercise-2-static/shop/views.py
from json import loads from django.http import JsonResponse from django.http import HttpResponseNotAllowed def view_articles(request): """ Handles GET and POST requests for a collection of articles. curl --include \ http://localhost:8000/shop/articles/ curl --include \ --request POST \ --header "Content-Type: application/json" \ --data '{"name":"test"}' \ http://localhost:8000/shop/articles/ """ if request.method == 'GET': return JsonResponse({'ids': [id for id in range(10)]}) if request.method == 'POST': data = loads(request.body) data['id'] = 1 return JsonResponse(data) return HttpResponseNotAllowed(['GET', 'POST']) def view_article(request, id): """ Handles GET, PATCH and DELETE requests for a single article. curl --include \ http://localhost:8000/shop/articles/1/ curl --include \ --request PATCH \ --header "Content-Type: application/json" \ --data '{"name":"test"}' \ http://localhost:8000/shop/articles/1/ curl --include \ --request DELETE \ http://localhost:8000/shop/articles/1/ """ if request.method == 'GET': return JsonResponse({'id': id}) if request.method == 'PATCH': data = loads(request.body) data['id'] = id return JsonResponse(data) if request.method == 'DELETE': return JsonResponse({'id': id}) return HttpResponseNotAllowed(['GET', 'PATCH', 'DELETE'])
/05-django/solutions/exercise-3-models/shop/admin.py
from django.contrib.admin import ModelAdmin, register from shop.models import Article @register(Article) class ArticelAdmin(ModelAdmin): pass
/05-django/solutions/exercise-3-models/shop/models.py
from django.db.models import Model from django.db.models import CharField class Article(Model): name = CharField(max_length=50)
/05-django/solutions/exercise-3-models/shop/views.py
from json import loads from django.http import JsonResponse from django.http import HttpResponseNotAllowed from django.http import HttpResponseNotFound from shop.models import Article def view_articles(request): """ Handles GET and POST requests for a collection of articles. curl --include \ http://localhost:8000/shop/articles/ curl --include \ --request POST \ --header "Content-Type: application/json" \ --data '{"name":"test"}' \ http://localhost:8000/shop/articles/ """ if request.method == 'GET': articles = [] for article in Article.objects.all(): articles.append({ 'id': article.id, 'name': article.name }) articles = Article.objects.all() return JsonResponse({'articles': articles}) if request.method == 'POST': data = loads(request.body) name = data.get('name') article = Article.objects.create(name=name) return JsonResponse({ 'id': article.id, 'name': article.name }) return HttpResponseNotAllowed(['GET', 'POST']) def view_article(request, id): """ Handles GET, PATCH and DELETE requests for a single article. curl --include \ http://localhost:8000/shop/articles/1/ curl --include \ --request PATCH \ --header "Content-Type: application/json" \ --data '{"name":"foo"}' \ http://localhost:8000/shop/articles/1/ curl --include \ --request DELETE \ http://localhost:8000/shop/articles/1/ """ article = Article.objects.filter(id=id).first() if not article: return HttpResponseNotFound() if request.method == 'GET': return JsonResponse({ 'id': article.id, 'name': article.name }) if request.method == 'PATCH': data = loads(request.body) name = data.get('name') article.name = name article.save() return JsonResponse({ 'id': article.id, 'name': article.name }) if request.method == 'DELETE': article.delete() return JsonResponse({'id': id}) return HttpResponseNotAllowed(['GET', 'PATCH', 'DELETE'])
/05-django/solutions/exercise-4-restframework/shop/views.py
from shop.models import Article from shop.serializers import ArticleSerializer from rest_framework.viewsets import ModelViewSet class ArticleViewSet(ModelViewSet): queryset = Article.objects.all() serializer_class = ArticleSerializer
/05-django/solutions/exercise-5-filters/shop/filters.py
from django_filters import FilterSet from shop.models import Article class ArticleFilter(FilterSet): class Meta: model = Article fields = ['category']
/05-django/solutions/exercise-5-filters/shop/serializers.py
from shop.models import Article from rest_framework.serializers import HyperlinkedModelSerializer class ArticleSerializer(HyperlinkedModelSerializer): class Meta: model = Article fields = ['id', 'name', 'category'] read_only_fields = ['id']
/05-django/solutions/exercise-5-filters/shop/urls.py
from shop.views import ArticleViewSet from rest_framework.routers import DefaultRouter router = DefaultRouter() router.register('articles', ArticleViewSet) urlpatterns = router.urls
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
krzysztof-dudzic/ProjektPortfolioLab
refs/heads/main
{"/charitydonation/views.py": ["/charitydonation/forms.py", "/charitydonation/models.py"], "/charitydonation/admin.py": ["/charitydonation/models.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/accounts/views.py", "/charitydonation/views.py"]}
└── ├── accounts │ ├── models.py │ └── views.py ├── charitydonation │ ├── admin.py │ ├── forms.py │ ├── migrations │ │ ├── 0002_auto_20210909_1554.py │ │ └── 0003_auto_20210913_1642.py │ ├── models.py │ └── views.py └── donation └── urls.py
/accounts/models.py
from django.db import models from django.contrib.auth.models import ( BaseUserManager, AbstractBaseUser, UserManager ) # # class UserManager(BaseUserManager): # def create_user(self, email, password=None): # """ # Creates and saves a User with the given email and password. # """ # if not email: # raise ValueError('Users must have an email address') # # if not password: # raise ValueError("Users must have a password!!! ") # user = self.model( # email=self.normalize_email(email), # ) # # user.set_password(password) # user.staff = is_staff # user.admin = is_admin # user.active = is_active # # user.save(using=self._db) # return user # # def create_staffuser(self, email, password): # """ # Creates and saves a staff user with the given email and password. # """ # user = self.create_user( # email, # password=password, # ) # user.staff = True # # user.save(using=self._db) # return user # # def create_superuser(self, email, password): # """ # Creates and saves a superuser with the given email and password. # """ # user = self.create_user( # email, # password=password, # ) # user.staff = True # user.admin = True # # user.save(using=self._db) # return user # # class User(AbstractBaseUser): # email = models.EmailField( # verbose_name='email address', # max_length=255, # unique=True, # ) # # full_name = models.CharField(max_length=255, blank=True, null=True) # is_active = models.BooleanField(default=True) # staff = models.BooleanField(default=False) # a admin user; non super-user # admin = models.BooleanField(default=False) # a superuser # timestamp = models.DateTimeField(auto_now_add=True) # # notice the absence of a "Password field", that is built in. # # USERNAME_FIELD = 'email' # REQUIRED_FIELDS = [] # Email & Password are required by default. # objects = UserManager() # # def get_full_name(self): # # The user is identified by their email address # return self.email # # def get_short_name(self): # # The user is identified by their email address # return self.email # # def __str__(self): # return self.email # # def has_perm(self, perm, obj=None): # "Does the user have a specific permission?" # # Simplest possible answer: Yes, always # return True # # def has_module_perms(self, app_label): # "Does the user have permissions to view the app `app_label`?" # # Simplest possible answer: Yes, always # return True # # @property # def is_staff(self): # "Is the user a member of staff?" # return self.staff # # @property # def is_active(self): # "Is the user a admin member?" # return self.active # # @property # def is_admin(self): # "Is the user a admin member?" # return self.admin # # # # # # class GuestEmail(models.Model): # email = models.EmailField() # active = models.BooleanField(default=True) # update = models.DateTimeField(auto_now=True) # timestamp = models.DateTimeField(auto_now_add=True) # # def __str__(self): # return self.email from django.db import models from django.contrib.auth.models import AbstractUser from django.utils.translation import gettext_lazy as _ class UserManager(BaseUserManager): def create_user(self, email, password=None, **extra_fields): if not email: raise ValueError('Users must have an email address') if not password: raise ValueError("Users must have a password!!! ") extra_fields.setdefault('is_staff', False) extra_fields.setdefault('is_superuser', False) extra_fields.setdefault('is_active', True) email = self.normalize_email(email) user = self.model(email=email, **extra_fields) user.set_password(password) user.save() return user def create_superuser(self, email, password, **extra_fields): extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) extra_fields.setdefault('is_active', True) if extra_fields.get('is_staff') is not True: raise ValueError(_('Superuser must have is_staff=True.')) if extra_fields.get('is_superuser') is not True: raise ValueError(_('Superuser must have is_superuser=True.')) return self.create_user(email, password, **extra_fields) class CustomUser(AbstractUser): username = None email = models.EmailField(_('email address'), max_length=255, unique=True) USERNAME_FIELD = 'email' REQUIRED_FIELDS = [] objects = UserManager() def __str__(self): return self.email
/accounts/views.py
from django.shortcuts import render from django.views import View, generic from django.contrib.auth import views # from .forms import RegisterForm from django.shortcuts import render from django.views import View, generic # from charitydonation.models import Donation, Institution from .forms import CreateUserForm, LoginForm, CustomUserCreationForm from django.contrib.auth import login, logout, authenticate, views from django.shortcuts import redirect from django.urls import reverse_lazy class LoginView(View): def get(self, request): form = LoginForm() return render(request, 'login.html', {'form': form}) def post(self, request, *args, **kwargs): form = LoginForm(request.POST) if form.is_valid(): user = authenticate(email=form.cleaned_data['email'], password=form.cleaned_data['password']) # breakpoint() if user is not None: login(request, user) return redirect('landing-page') else: return render(request, 'login.html', {'form': form}) else: return render(request, 'login.html', {'form': form}) class RegisterView(View): def get(self, request): form = CustomUserCreationForm() return render(request, 'register.html', {'form': form}) def post(self, request): form = CustomUserCreationForm(request.POST) if form.is_valid(): form.save() # instance = form.save(commit=False) # instance.set_password(instance.password) # # form.clean_password2() # instance.save() # # email = form.cleaned_data['email'] # raw_password = form.cleaned_data['password'] # user = authenticate(email=email, password=raw_password) # user.save() # login(request, user) return redirect('landing-page') return render(request, 'register.html', {'form': form}) class LogoutView(View): def get(self, request): logout(request) return redirect('landing-page')
/charitydonation/admin.py
from django.contrib import admin from .models import Category, Institution, Donation admin.site.register(Category) admin.site.register(Institution) admin.site.register(Donation)
/charitydonation/forms.py
from django.contrib.auth.forms import UserCreationForm, UserChangeForm from .models import Donation from django import forms from django.contrib.auth.forms import UserCreationForm, AuthenticationForm from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.contrib.auth import get_user_model # class CreateUserForm(UserCreationForm): # class Meta: # model = get_user_model() # fields = ('email', 'username', 'password1', 'password2') class AddDonationForm(forms.Form): class Meta: model = Donation fields = ('quantity', 'categories', 'institution', 'address', 'phone_number', 'city', 'zip_code', 'pick_up_date', 'pick_up_time', 'pick_up_comment', 'user')
/charitydonation/migrations/0002_auto_20210909_1554.py
# Generated by Django 3.1 on 2021-09-09 15:54 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('charitydonation', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='donation', name='pick_up_time', ), migrations.AlterField( model_name='donation', name='pick_up_date', field=models.DateTimeField(verbose_name=datetime.datetime), ), migrations.AlterField( model_name='institution', name='type', field=models.CharField(choices=[('1', 'Fundacja'), ('2', 'Organizacja pozarządowa'), ('3', 'Zbiórka lokalna')], default='1', max_length=2), ), ]
/charitydonation/migrations/0003_auto_20210913_1642.py
# Generated by Django 3.1 on 2021-09-13 16:42 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('charitydonation', '0002_auto_20210909_1554'), ] operations = [ migrations.AddField( model_name='donation', name='pick_up_time', field=models.TimeField(default=datetime.time), ), migrations.AlterField( model_name='donation', name='pick_up_date', field=models.DateField(), ), ]
/charitydonation/models.py
import datetime from django.contrib.auth.models import User from django.contrib.auth.models import AbstractUser from django.db import models from django.utils.translation import gettext_lazy as _ # from ProjektPortfolioLab.donation import settings from django.conf import settings User = settings.AUTH_USER_MODEL class Category(models.Model): name = models.CharField(max_length=64) def __str__(self): return self.name INSTITUTIONS = ( ('1', "Fundacja"), ('2', "Organizacja pozarządowa"), ('3', "Zbiórka lokalna"), ) class Institution(models.Model): istitution_name = models.CharField(max_length=128) description = models.TextField() type = models.CharField(max_length=2, choices=INSTITUTIONS, default='1') categories = models.ManyToManyField(Category) def __str__(self): return self.istitution_name class Donation(models.Model): quantity = models.IntegerField() categories = models.ManyToManyField(Category) institution = models.ForeignKey(Institution, on_delete=models.CASCADE) address = models.TextField() phone_number = models.CharField(max_length=12) city = models.CharField(max_length=64) zip_code = models.TextField() pick_up_date = models.DateField() pick_up_time = models.TimeField(default=datetime.time) pick_up_comment = models.TextField() user = models.ForeignKey(User, on_delete=models.CASCADE) # # class CustomUser(AbstractUser): # email = models.EmailField(_('email address'), unique=True)
/charitydonation/views.py
from django.shortcuts import render from django.views import View, generic from .models import Donation, Institution, Category from .forms import AddDonationForm from django.contrib.auth import login, logout, authenticate, views from django.shortcuts import redirect from django.urls import reverse_lazy from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from django.views.generic.edit import CreateView from django.db.models import Avg, Count from django.core.paginator import Paginator from django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView from django.http import HttpResponse from django.db.models import Q, Sum class LandingPage(View): def get(self, request): count_bags = Donation.objects.all() count_b = count_bags.aggregate(Sum('quantity'))['quantity__sum'] count_institutions = Donation.objects.distinct("institution").count() # all_institution_fund = Institution.objects.filter(type='1') all_institution_org = Institution.objects.filter(type='2') all_institution_lok = Institution.objects.filter(type='3') return render(request, 'index.html', {'count_b': count_b, 'count_institutions': count_institutions, 'all_institution_fund': all_institution_fund, 'all_institution_org': all_institution_org, 'all_institution_lok': all_institution_lok} ) class AddDonation(LoginRequiredMixin, View): login_url = '/' # raise_exception = True def get(self, request): categories_all = Category.objects.all() institutions_all = Institution.objects.all() form = AddDonationForm() # redirect_field_name = 'landing-page' return render(request, 'form.html', {'categories_all': categories_all, 'institutions_all': institutions_all, 'form': form}) def post(self, request): form = AddDonationForm(request.POST) if form.is_valid(): # categories_all = Category.objects.all() categories = form.cleaned_data['categories'] # institutions_all = Institution.objects.all() quantity = form.cleaned_data['bags'] # category_id = request.POST.get('category.id') # catogeria = Institution.objects.filter(id=category_id) institution = form.cleaned_data['organization'] # if request.POST.get( # catego = Category.objects.get(id=category_id) address = form.cleaned_data['address'] city = form.cleaned_data['city'] zip_code = form.cleaned_data['postcode'] phone_number = form.cleaned_data['phone'] pick_up_date = form.cleaned_data['data'] pick_up_time = form.cleaned_data['time'] pick_up_comment = form.cleaned_data['more_info'] user = request.user donat = Donation.objects.create( quantity=quantity, categories=categories, institution=institution, address=address, phone_number=phone_number, city=city, zip_code=zip_code, pick_up_date=pick_up_date, pick_up_comment=pick_up_comment, pick_up_time=pick_up_time, user=user) donat.save() # redirect_field_name = 'landing-page' return render(request, 'form-confirmation.html', {'form': form}) return render(request, 'form.html', {'form': form}) # return HttpResponse("Źle") # class LoginView(views.LoginView): # form_class = LoginForm # template_name = 'login.html' # # # class RegisterView(generic.CreateView): # form_class = CreateUserForm # template_name = 'register.html' # success_url = reverse_lazy('login') class UserView(LoginRequiredMixin, View): login_url = '/' def get(self, request): donation_user = Donation.objects.filter(user=request.user) return render(request, 'user-view.html', {'donation_user': donation_user}) class PasswordChangeView(PasswordChangeView): template_name = 'change-password.html' success_url = 'done/' class PasswordChangeDoneView(PasswordChangeDoneView): template_name = 'change-password-done.html' class DonationReady(View): def get(self, request): return render(request, 'form-confirmation.html')
/donation/urls.py
"""donation URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from charitydonation.views import LandingPage, AddDonation, UserView, PasswordChangeView, PasswordChangeDoneView, DonationReady from accounts.views import RegisterView, LoginView, LogoutView urlpatterns = [ path('admin/', admin.site.urls), path('', LandingPage.as_view(), name='landing-page'), path('add_donation/', AddDonation.as_view(), name='add-donation'), path('login/', LoginView.as_view(), name='login'), path('register/', RegisterView.as_view(), name='register'), path('logout/', LogoutView.as_view(), name='logout'), path('user_view/', UserView.as_view(), name='user-view'), path('password_change/', PasswordChangeView.as_view(), name='user-change'), path('password_change/done/', PasswordChangeDoneView.as_view(), name='user-change-done'), path('add_donation/form-confirmation/', DonationReady.as_view(), name='form-ready'), ]
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
DamienPond001/Udemy_API
refs/heads/master
{"/API/Section5/code/app.py": ["/API/Section7/code/security.py", "/API/Section6/code/NoDB/security.py"], "/API/Section6/code/UseDB/app.py": ["/API/Section7/code/security.py", "/API/Section6/code/UseDB/item.py", "/API/Section6/code/NoDB/security.py", "/API/Section9/1_recap_of_code/start/resources/user.py"], "/API/Section6/code/NoDB/security.py": ["/API/Section9/1_recap_of_code/start/resources/user.py"]}
└── ├── API │ ├── Section4 │ │ └── code │ │ └── app.py │ ├── Section5 │ │ └── code │ │ └── app.py │ ├── Section6 │ │ ├── code │ │ │ ├── NoDB │ │ │ │ └── security.py │ │ │ └── UseDB │ │ │ ├── app.py │ │ │ └── item.py │ │ └── test.py │ ├── Section7 │ │ └── code │ │ ├── models │ │ │ └── __init__.py │ │ └── security.py │ ├── Section9 │ │ └── 1_recap_of_code │ │ └── start │ │ └── resources │ │ └── user.py │ ├── app.py │ └── storeapp.py └── Datacamp ├── A_B_testing.py ├── EDA_ECDF.py ├── EDA_boxplot_percentile.py ├── EDA_distributions.py ├── Joining_data.py ├── Reading_Data.py ├── SQL.py ├── Working_with_DB.py ├── append_concatdf.py ├── bokeh.py ├── bokeh_interaction.py ├── bokeh_layouts.py ├── bokeh_linked_plots.py ├── bokeh_numpy_pandas.py ├── bokeh_tooltips.py ├── bootstrapping.py ├── data-types.py ├── data_explore.py ├── dataframe_arithmetic.py ├── datetime_indices.py ├── grouby.py ├── hypothesis_testing_with_one_dataset.py ├── idxmax_idxmin.py ├── indexing.py ├── manipulating_indices.py ├── melting.py ├── merge_ordered.py ├── merging.py ├── multi_indexing.py ├── pandas.py ├── parameter_optimisation.py ├── pivit_tables.py ├── pivoting_tables.py ├── readin_and_cleaning.py ├── resampling.py ├── seaborn.py ├── seaborn_multivariate.py ├── sqlalchemy.py ├── sqlalchemy_grouping_labeling.py ├── sqlalchemy_joins.py ├── sqlalchemy_more_statements.py ├── sqlalchemy_statements.py ├── stack_unstack.py ├── tidy_data.py ├── twitter_example.py └── web_import.py
/API/Section4/code/app.py
'''This was created after installing virtualenv. This allows use to create a virtual environment that mimics a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions. Run: conda create -n venv python=3.5.0 anaconda to create a virtual env called venv with python 3.5.0 conda activate venv conda deactivate''' from flask import Flask, request from flask_restful import Resource, Api app = Flask(__name__) api = Api(app) items = [] class Item(Resource): def get(self, name): item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None return {"item" : item}, 200 if item is not None else 404 def post(self, name): #Note that the 'Header' and 'Body' need to be set if next(filter(lambda x: x['name'] == name, items), None) is not None: return {"message" : "an item with name '{}' already exists.".format(name)}, 400 #400 = bad request data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use) item = {'name' : name, 'price' : data['price']} items.append(item) return item, 201 #201 is code for created class ItemList(Resource): def get(self): return{"items" : items} api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name api.add_resource(ItemList, '/items') app.run(port=5000, debug=True) #debug gives better error messages
/API/Section5/code/app.py
'''This was created after installing virtualenv. This allows use to create a virtual environment that mimics a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions. Run: conda create -n venv python=3.5.0 anaconda to create a virtual env called venv with python 3.5.0 conda activate venv conda deactivate''' from flask import Flask, request from flask_restful import Resource, Api, reqparse from flask_jwt import JWT, jwt_required from security import authenticate, identity app = Flask(__name__) app.secret_key = "secret_key" #this should be long and complicated in a production sense api = Api(app) jwt = JWT(app, authenticate, identity) ''' JWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity If authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT The JWT calls the identity function which gets the correct id and returns the user ''' items = [] class Item(Resource): parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class parser.add_argument('price', type = float, required = True, help = "This field cannot be left blank") @jwt_required() def get(self, name): item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None return {"item" : item}, 200 if item is not None else 404 def post(self, name): #Note that the 'Header' and 'Body' need to be set if next(filter(lambda x: x['name'] == name, items), None) is not None: return {"message" : "an item with name '{}' already exists.".format(name)}, 400 #400 = bad request data = Item.parser.parse_args() #data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use) item = {'name' : name, 'price' : data['price']} items.append(item) return item, 201 #201 is code for created def delete(self, name): global items items = list(filter(lambda x : x['name'] != name, items)) return {"message" : "Item deleted"} def put(slef, name): # parser = reqparse.RequestParser() #reqparse allows us to specify which items in the JSON payload can be used for the variable updates # parser.add_argument('price', #we add which arguments we can allow through. The request gets run through the parser # type = float, # required = True, # help = "This field cannot be left blank") #and many more! data = Item.parser.parse_args() #any args other than "price" will just get erased #data = request.get_json() #this is sone in the above #print(data['another']) --- this would return an error, even if 'another' was in the json payload as by this point it has been removed by the parser item = next(filter(lambda x: x['name'] == name, items), None) if item is None: item = {"name" : name, "price" : data['price']} items.append(item) else: item.update(data) #Note, item is a reference to the items entry and so will be updated there as well print(items) return item class ItemList(Resource): def get(self): return{"items" : items} api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name api.add_resource(ItemList, '/items') app.run(port=5000, debug=True) #debug gives better error messages
/API/Section6/code/NoDB/security.py
from user import User from werkzeug.security import safe_str_cmp #some database users = [ User(1, "bob", "asdf"), User(2, "Damien", "bitches") ] #below allows us to find the user by username or ID without having to iterate over the above list username_mapping = {u.username : u for u in users} #list comprehension where the function is a key:value pair userid_mapping = {u.id : u for u in users} def authenticate(username, password): user = username_mapping.get(username, None) #note that this is the same as the [] notation, but allows a default value if user is not None and safe_str_cmp(user.password, password): #safe_str_cmp() alleviates issues with string comparison return user #identity function is unique to flask JWT #payload is the contents on the JWT Token def identity(payload): user_id = payload['identity'] return userid_mapping.get(user_id, None)
/API/Section6/code/UseDB/app.py
'''This was created after installing virtualenv. This allows use to create a virtual environment that mimics a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions. Run: conda create -n venv python=3.5.0 anaconda to create a virtual env called venv with python 3.5.0 conda activate venv conda deactivate''' from flask import Flask from flask_restful import Api from flask_jwt import JWT from security import authenticate, identity from user import UserRegister from item import Item, ItemList app = Flask(__name__) app.secret_key = "secret_key" #this should be long and complicated in a production sense api = Api(app) jwt = JWT(app, authenticate, identity) ''' JWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity If authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT The JWT calls the identity function which gets the correct id and returns the user ''' api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name api.add_resource(ItemList, '/items') api.add_resource(UserRegister, '/register') if __name__ == '__main__': #This ensures that this is not run if app.py is imported, but only when called app.run(port=5000, debug=True) #debug gives better error messages
/API/Section6/code/UseDB/item.py
from flask_restful import Resource, reqparse from flask_jwt import jwt_required import sqlite3 class Item(Resource): parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class parser.add_argument('price', type = float, required = True, help = "This field cannot be left blank") @jwt_required() def get(self, name): item = self.find_by_name(name) #http://127.0.0.1:5000/item/wine?price=17 will pass 17 to the args #args = Item.parser.parse_args() #print(args['price']) if item is not None: return item, 200 else: return {"message" : "Item not found"}, 404 @classmethod def find_by_name(cls, name): connection = sqlite3.connect('data.db') cursor = connection.cursor() select_query = "SELECT * FROM items WHERE name = ?" result = cursor.execute(select_query, (name,)) item_in_db = result.fetchone() connection.close() if item_in_db is not None: return {'item' : {'name' : item_in_db[0], 'price': item_in_db[1]}} #We could use the get() method but that requires a JWT #Thus we use the alternative class method def post(self, name): item = self.find_by_name(name) if item is not None: return {"message":"item already in database"}, 400 data = Item.parser.parse_args() item = {'name' : name, 'price': data['price']} try: self.insert_item(item) except: return {"message" : "An error occurred"}, 500 return {'name' : name, 'price' : data['price']}, 201 #201 is code for created @classmethod def insert_item(cls, item): connection = sqlite3.connect('data.db') cursor = connection.cursor() insert_query = "INSERT INTO items VALUES (?, ?)" cursor.execute(insert_query, (item['name'], item['price'])) connection.commit() connection.close() def delete(self, name): connection = sqlite3.connect('data.db') cursor = connection.cursor() delete_query = "DELETE FROM items WHERE name = ?" cursor.execute(delete_query, (name,)) connection.commit() connection.close() return {"message" : "Item deleted"} def put(self, name): item = self.find_by_name(name) data = Item.parser.parse_args() updated_item = {'name' : name, 'price': data['price']} if item is None: try: self.insert_item(updated_item) except: {"message" : "an error occurred"}, 500 else: try: self.update(updated_item) except: {"message" : "an error occurred"}, 500 return updated_item, 201 #201 is code for created @classmethod def update(cls, item): connection = sqlite3.connect('data.db') cursor = connection.cursor() insert_query = "UPDATE items SET price = ? WHERE name = ?" cursor.execute(insert_query, (item['price'], item['name'])) connection.commit() connection.close() class ItemList(Resource): def get(self): connection = sqlite3.connect('data.db') cursor = connection.cursor() query = "SELECT * FROM items" result = cursor.execute(query) items = result.fetchall() connection.close() if items is not None: return {'items' : items} else: return {"message" : "No items in database"}
/API/Section6/test.py
import sqlite3 connection = sqlite3.connect('data.db') cursor = connection.cursor() #similar to a screen cursor, it allows us to selct and start thinigs. It executes the queries create_table = "CREATE TABLE users (id int, username text, password text)" cursor.execute(create_table) user = (1, "damien", "bitches") insert_query = "INSERT INTO users VALUES (?, ?, ?)" cursor.execute(insert_query, user) users = [ (2, "not damien", "notbitches"), (3, "other", "otherps") ] cursor.executemany(insert_query, users) select_query = "SELECT * from users" a = cursor.execute(select_query) res = connection.execute("SELECT name FROM sqlite_master WHERE type='table';") for name in res: print (name[0]) print(next(a)) connection.commit() connection.close()
/API/Section7/code/models/__init__.py
# -*- coding: utf-8 -*- """ Created on Tue Aug 7 11:20:39 2018 @author: Damien """
/API/Section7/code/security.py
from models.user import UserModel from werkzeug.security import safe_str_cmp def authenticate(username, password): user = UserModel.find_by_username(username) if user is not None and safe_str_cmp(user.password, password): #safe_str_cmp() alleviates issues with string comparison return user #identity function is unique to flask JWT #payload is the contents on the JWT Token def identity(payload): user_id = payload['identity'] return UserModel.find_by_id(user_id)
/API/Section9/1_recap_of_code/start/resources/user.py
from flask_restful import Resource, reqparse from werkzeug.security import safe_str_cmp from flask_jwt_extended import ( create_access_token, create_refresh_token, jwt_refresh_token_required, get_jwt_identity ) from models.user import UserModel class UserRegister(Resource): parser = reqparse.RequestParser() parser.add_argument('username', type=str, required=True, help="This field cannot be blank." ) parser.add_argument('password', type=str, required=True, help="This field cannot be blank." ) def post(self): data = UserRegister.parser.parse_args() if UserModel.find_by_username(data['username']): return {"message": "A user with that username already exists"}, 400 user = UserModel(**data) user.save_to_db() return {"message": "User created successfully."}, 201 class User(Resource): @classmethod def get(cls, user_id): user = UserModel.find_by_id(user_id) if user is None: return {'message' :'user not found'}, 404 else: return user.json() @classmethod def delete(cls, user_id): user = UserModel.find_by_id(user_id) if user is None: return {'message' : 'User not found'}, 404 else: user.delete_from_db() return {'message' : 'User deleted'} class UserLogin(Resource): parser = reqparse.RequestParser() parser.add_argument('username', type=str, required=True, help="This field cannot be blank." ) parser.add_argument('password', type=str, required=True, help="This field cannot be blank." ) def post(self): data = self.parser.parse_args() user = UserModel.find_by_username(data['username']) #This is what 'authenticate()' used to do if user is not None and safe_str_cmp(user.password, data['password']): #What the 'identity()' function used to do access_token = create_access_token(identity = user.id, fresh = True) refresh_token = create_refresh_token(user.id) return { 'access_token' : access_token, 'refresh_token' : refresh_token }, 200 else: return {'message' : 'Invalid credentials'}, 401 class TokenRefresh(Resource): @jwt_refresh_token_required def post(self): current_user = get_jwt_identity() new_token = create_access_token(identity = current_user, fresh = False) return {'access_token' : new_token}, 200
/API/app.py
# -*- coding: utf-8 -*- """ Created on Tue Jul 24 15:04:52 2018 @author: Damien """ from flask import Flask app = Flask(__name__) #unique __name__ - special python variable #What requests we need to understand @app.route('/') #http://www.google.com/ - '/' represents the home page [http://www.google.com/maps represents a '/maps' endpoint] def home(): #whatever this does it must return a reponse to the browser return "Hello, world!" app.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests #run from conda "python app.py" #copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer)
/API/storeapp.py
# -*- coding: utf-8 -*- """ Created on Tue Jul 24 16:43:47 2018 @author: Damien """ from flask import Flask, jsonify, request, render_template #NOTE on JSON: JSON are essentially dictionaries but in string format. Thus we need to convert of Python dicts to text app = Flask(__name__) #unique __name__ - special python variable stores = [ { 'name': 'My Store', 'items': [ { 'name':'My Item', 'price':15.99 } ] } ] @app.route('/') def home(): return render_template('index.html') #Looks in template folder #POST - recieves data #GET - send data back ##End points we are going to define #POST /store data: {name:} @app.route('/store', methods = ['POST']) #default to GET def create_store(): request_data = request.get_json() new_store = { 'name': request_data['name'], 'items' : [] } stores.append(new_store) return jsonify(new_store) #GET /store/<string:name> @app.route('/store/<string:name>') #<string:name> is a flask keyword def get_store(name): for store in stores: if store['name'] == name: return jsonify(store) else: return jsonify({'message' : 'No such store'}) #GET /store @app.route('/store') def get_stores(): return jsonify({'stores' : stores}) #convert list to dictionary #POST /store/<string:name>/item {name:, price:} @app.route('/store/<string:name>/item', methods = ['POST']) #default to GET def create_item(name): request_data = request.get_json() for store in stores: if store['name'] == name: new_item = { 'name' : request_data['name'], 'price' : request_data['price'] } store['items'].append(new_item) return jsonify(new_item) else: return jsonify({"message" : " No such store"}) #GET /store/<string:name>/item @app.route('/store/<string:name>/item') #<string:name> is a flask keyword def get_item_in_store(name): for store in stores: if store['name'] == name: return jsonify({'items' : store['items']}) else: return jsonify({'message' : 'No such store'}) app.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests #run from conda "python app.py" #copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer)
/Datacamp/A_B_testing.py
# Construct arrays of data: dems, reps dems = np.array([True] * 153 + [False] * 91) reps = np.array([True] * 136 + [False] * 35) def frac_yea_dems(dems, reps): """Compute fraction of Democrat yea votes.""" frac = np.sum(dems) / len(dems) return frac # Acquire permutation samples: perm_replicates perm_replicates = draw_perm_reps(dems, reps, frac_yea_dems, 10000) # Compute and print p-value: p p = np.sum(perm_replicates <= 153/244) / len(perm_replicates) print('p-value =', p) # Compute the difference in mean sperm count: diff_means diff_means = diff_of_means(control, treated) # Compute mean of pooled data: mean_count mean_count = np.mean(np.concatenate([control, treated])) # Generate shifted data sets control_shifted = control - np.mean(control) + mean_count treated_shifted = treated - np.mean(treated) + mean_count # Generate bootstrap replicates bs_reps_control = draw_bs_reps(control_shifted, np.mean, size=10000) bs_reps_treated = draw_bs_reps(treated_shifted, np.mean, size=10000) # Get replicates of difference of means: bs_replicates bs_replicates = bs_reps_control- bs_reps_treated # Compute and print p-value: p p = np.sum(bs_replicates >= np.mean(control) - np.mean(treated)) \ / len(bs_replicates) print('p-value =', p)
/Datacamp/EDA_ECDF.py
def ecdf(data): """Compute ECDF for a one-dimensional array of measurements.""" # Number of data points: n n = len(data) # x-data for the ECDF: x x = np.sort(data) # y-data for the ECDF: y y = np.arange(1, n+1) / n return x, y # Compute ECDF for versicolor data: x_vers, y_vers x_vers, y_vers = ecdf(versicolor_petal_length) # Generate plot _ = plt.plot(x_vers, y_vers, marker='.', linestyle='none') # Label the axes plt.xlabel('versicolor_petal_length') plt.ylabel('ECDF') # Display the plot plt.show()
/Datacamp/EDA_boxplot_percentile.py
import numpy as np np.mean(data) np.median(data) np.var(versicolor_petal_length) np.std(versicolor_petal_length) #covariance matrix: # returns a 2D array where entries [0,1] and [1,0] are the covariances. # Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of the data in y np.cov(versicolor_petal_length, versicolor_petal_width) def pearson_r(x, y): """Compute Pearson correlation coefficient between two arrays.""" # Compute correlation matrix: corr_mat corr_mat = np.corrcoef(x,y) # Return entry [0,1] return corr_mat[0,1] # Compute Pearson correlation coefficient for I. versicolor: r r = pearson_r(versicolor_petal_length, versicolor_petal_width) # Print the result print(r) # Specify array of percentiles: percentiles percentiles = np.array([2.5, 25, 50, 75, 97.5]) # Compute percentiles: ptiles_vers ptiles_vers= np.percentile(versicolor_petal_length, percentiles) # Print the result print(ptiles_vers) # Create box plot with Seaborn's default settings _ = sns.boxplot(x='species', y='petal length (cm)', data=df) # Label the axes plt.xlabel('species') plt.ylabel('petal length (cm)') # Show the plot plt.show()
/Datacamp/EDA_distributions.py
np.random.binomial(trails, probablity_of_success, size=number_of_reps) np.random.poisson(average_rate, size=number_of_reps) np.random.normal(mean, std, size=) # Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10 samples_std1 = np.random.normal(20,1,size=100000) samples_std3 = np.random.normal(20,3,size=100000) samples_std10 = np.random.normal(20,10,size=100000) # Make histograms _ = plt.hist(samples_std1, normed=True, histtype='step', bins=100) _ = plt.hist(samples_std3, normed=True, histtype='step', bins=100) _ = plt.hist(samples_std10, normed=True, histtype='step', bins=100) # Make a legend, set limits and show plot _ = plt.legend(('std = 1', 'std = 3', 'std = 10')) plt.ylim(-0.01, 0.42) plt.show() # Compute mean and standard deviation: mu, sigma mu = np.mean(belmont_no_outliers) sigma = np.std(belmont_no_outliers) # Sample out of a normal distribution with this mu and sigma: samples samples = np.random.normal(mu, sigma, size=10000) # Get the CDF of the samples and of the data x_theor, y_theor = ecdf(samples) x, y = ecdf(belmont_no_outliers) # Plot the CDFs and show the plot _ = plt.plot(x_theor, y_theor) _ = plt.plot(x, y, marker='.', linestyle='none') _ = plt.xlabel('Belmont winning time (sec.)') _ = plt.ylabel('CDF') plt.show() np.randm.exponential(mean, size=) def successive_poisson(tau1, tau2, size=1): """Compute time for arrival of 2 successive Poisson processes.""" # Draw samples out of first exponential distribution: t1 t1 = np.random.exponential(tau1, size=size) # Draw samples out of second exponential distribution: t2 t2 = np.random.exponential(tau2, size=size) return t1 + t2
/Datacamp/Joining_data.py
#Row concatenation row_concat = pd.concat([uber1, uber2, uber3]) #where each is a df #Note though that the original row indices will be maintained #Use the ignore_index = True to reset the indices in sequential order #Use the axis =1 to do column concatenation #If we have many files to concatenate: # Import necessary modules import glob import pandas as pd # Write the pattern: pattern pattern = '*.csv' # * = all strings # ? = single character # Save all file matches: csv_files csv_files = glob.glob(pattern) #this gives a list of files that match the pattern # Create an empty list: frames frames = [] # Iterate over csv_files for csv in csv_files: # Read csv into a DataFrame: df df = pd.read_csv(csv) # Append df to frames frames.append(df) # Concatenate frames into a single DataFrame: uber uber = pd.concat(frames)
/Datacamp/Reading_Data.py
#Basics of reading in: filename = 'file.txt' file = open(filename, mode = 'r') #'r' is top read, 'w' is to write text = file.read() file.close() with open('huck_finn.txt', 'r') as file: #with is referred to as the context manager print(file.read()) #Using NumPy - for numeric arrays #This allows use of sci-kit learn import numpy as np #Can use: data = np.loadtxt(filename, delimiter = "'", skiprows = 1, usecols=[0, 2], dtype=str) #Alternatively, use Pandas (this is preferable) import pandas as pd data = pd.read_csv(filename, sep = '\t', comment='#', na_values='Nothing') #comment drops everything after '#', na_values are user specified nulls #header=0 and names=new_names will label the rows #parse_date does something #index_col specifies which col should be the index data.head() #prints first 5 rows .head(10) displays 10 rows data_array = data.values #converts to numpy array #Other types of import files: #Pickled file: files containing python data structures that don't traslate to an obvious readible form (i.e. dicts, lists, tuples) # Import pickle package import pickle # Open pickle file and load data: d with open('data.pkl', 'rb') as file: d = pickle.load(file) #Excel file = "excel.xlsx" data = pd.ExcelFile(file) print(data.sheet_names) df1 = data.parse('name_of_sheet') df2 = data.parse(1) #index of sheet df1 = data.parse(0, skiprows=[1], names=['Country', 'AAM due to War (2002)']) #SAS # Import sas7bdat package from sas7bdat import SAS7BDAT # Save file to a DataFrame: df_sas with SAS7BDAT('sales.sas7bdat') as file: df_sas = file.to_data_frame() #Stata # Import pandas import pandas as pd # Load Stata file into a pandas DataFrame: df df = pd.read_stata('disarea.dta') #HDF5 (Hierarchical Data Format version 5) import h5py # Assign filename: file file = 'LIGO_data.hdf5' # Load file: data data = h5py.File(file, 'r') # Print the datatype of the loaded file print(type(data)) # Print the keys of the file. HDF5 files have a heirarchical structure that can be drilled down using the keys for key in data.keys(): print(key) group = data['strain'] # Check out keys of group for key in group.keys(): print(key) # Set variable equal to time series data: strain strain = data['strain']['Strain'].value #MATLAB # Import package import scipy.io # Load MATLAB file: mat mat = scipy.io.loadmat('albeck_gene_expression.mat') #loads a dict with the variables : values of thingfs that were saved in the MATLAB workspace
/Datacamp/SQL.py
SELECT * FROM table SELECT COUNT(*) FROM table #counts number of rows SELECT DISTINCT row FROM table #selects unique entries in row SELECT COUNT(row) FROM table #counts non-null entries SELECT COUNT(DISTINCT row) FROM table #returns count of distinct entries SELECT * FROM table WHERE column_value = 'some_value' #Use boolean operators, note that <> is != SELECT * FROM table WHERE column1 = 'some_value' AND/OR column2 > some_value; SELECT * FROM table WHERE column BETWEEN value1 AND value2; #Returns a range (inclusive) SELECT * FROM table WHERE column IN ('...', '....', '....') #use this instead of multiple ORs SELECT * FROM table WHERE column IS NULL\IS NOT NULL #filter column on null\not null values SELECT * FROM table WHERE column LIKE 'Data%' # % wildcard matches none, one or many SELECT * FROM table WHERE column NOT LIKE 'Data%' # % wildcard matches none, one or many. Here we return all entrie that DON'T match SELECT * FROM table WHERE column LIKE 'Data_' # _ wildcard matches a single char ###AGGREGATION#### SELECT SUM(column) FROM table #AVG, MIN, MAX SELECT (col1 + col2)*3 AS new_col FROM table #Note: (3/2) = 1, (3.0/2.0) = 1.5 #Can combine aggregations with arithmetic ####ORDERING#### SELECT column FROM table ORDER BY col1 DESC #NOTE comes after WHERE clauses ###GROUPING### SELECT col1, COUNT(col2) FROM table GROUP BY col1 #NOTE can't SELECT a column that isn't the GROUP BY, unless we aggregate it ###HAVING### SELECT column FROM table HAVING AVG(col1) > ... ###FULL EG### SELECT release_year, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films WHERE release_year > 1990 GROUP BY release_year HAVING AVG(budget) > 60000000 ORDER BY avg_gross DESC SELECT country, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films GROUP BY country HAVING COUNT(title) > 10 ORDER BY country LIMIT 5
/Datacamp/Working_with_DB.py
# Import necessary module from sqlalchemy import create_engine import pandas as pd # Create engine: engine engine = create_engine('sqlite:///Chinook.sqlite') # Save the table names to a list: table_names table_names = engine.table_names() # Print the table names to the shell print(table_names) #Executing a query con = engine.connect() # Perform query: rs rs = con.execute("SELECT * from Album") # Save results of the query to DataFrame: df df = pd.DataFrame(rs.fetchall()) df.columns = rs.keys() # Close connection con.close() #auto close connection with engine.connect() as con: rs = con.execute("SELECT LastName, Title FROM Employee") df = pd.DataFrame(rs.fetchmany(3)) df.columns = rs.keys() #ALTERNATIVELY # Import packages from sqlalchemy import create_engine import pandas as pd # Create engine: engine engine = create_engine('sqlite:///Chinook.sqlite') # Execute query and store records in DataFrame: df df = pd.read_sql_query('SELECT * FROM Album', engine)
/Datacamp/append_concatdf.py
# Append names_1981 after names_1881 with ignore_index=True: combined_names combined_names = names_1881.append(names_1981, ignore_index=True) #ignore_index resets the index, else the indices from the original dfs are placed on top of one another # Concatenate weather_max and weather_mean horizontally: weather weather = pd.concat([weather_max, weather_mean], axis=1) #axis=1 means concat horizontally (this does something similar to a full outer join) Max TemperatureF Mean TemperatureF Apr 89.0 53.100000 Aug NaN 70.000000 Dec NaN 34.935484 Feb NaN 28.714286 Jan 68.0 32.354839 Jul 91.0 72.870968 Jun NaN 70.133333 Mar NaN 35.000000 May NaN 62.612903 Nov NaN 39.800000 Oct 84.0 55.451613 Sep NaN 63.766667 for medal in medal_types: # Create the file name: file_name file_name = "%s_top5.csv" % medal # Create list of column names: columns columns = ['Country', medal] # Read file_name into a DataFrame: df medal_df = pd.read_csv(file_name, header=0, index_col='Country', names=columns) #names sets the column names # Append medal_df to medals medals.append(medal_df) # Concatenate medals horizontally: medals medals = pd.concat(medals, axis='columns') #same as axis=1 # Print medals print(medals) #using multi level indexes: for medal in medal_types: file_name = "%s_top5.csv" % medal # Read file_name into a DataFrame: medal_df medal_df = pd.read_csv(file_name, index_col='Country') # Append medal_df to medals medals.append(medal_df) # Concatenate medals: medals medals = pd.concat(medals, axis='rows', keys=['bronze', 'silver', 'gold']) # Print medals in entirety print(medals) Total Country bronze United States 1052.0 Soviet Union 584.0 United Kingdom 505.0 France 475.0 Germany 454.0 silver United States 1195.0 Soviet Union 627.0 United Kingdom 591.0 France 461.0 Italy 394.0 gold United States 2088.0 Soviet Union 838.0 United Kingdom 498.0 Italy 460.0 Germany 407.0 # Sort the entries of medals: medals_sorted medals_sorted = medals.sort_index(level=0) # Print the number of Bronze medals won by Germany print(medals_sorted.loc[('bronze','Germany')]) # Print data about silver medals print(medals_sorted.loc['silver']) # Create alias for pd.IndexSlice: idx #A slicer pd.IndexSlice is required when slicing on the inner level of a MultiIndex idx = pd.IndexSlice # Print all the data on medals won by the United Kingdom print(medals_sorted.loc[idx[:,'United Kingdom'], :]) # Make the list of tuples: month_list month_list = [('january', jan), ('february', feb), ('march', mar)] # Create an empty dictionary: month_dict month_dict = {} for month_name, month_data in month_list: # Group month_data: month_dict[month_name] month_dict[month_name] = month_data.groupby('Company').sum() # Concatenate data in month_dict: sales sales = pd.concat(month_dict) # Print sales print(sales) Units Company february Acme Coporation 34 Hooli 30 Initech 30 Mediacore 45 Streeplex 37 january Acme Coporation 76 Hooli 70 Initech 37 Mediacore 15 Streeplex 50 march Acme Coporation 5 Hooli 37 Initech 68 Mediacore 68 Streeplex 40 # Print all sales by Mediacore idx = pd.IndexSlice print(sales.loc[idx[:, 'Mediacore'], :])
/Datacamp/bokeh.py
# Import figure from bokeh.plotting from bokeh.plotting import figure # Import output_file and show from bokeh.io from bokeh.io import output_file, show # Create the figure: p p = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)') # Add a circle glyph to the figure p p.circle(fertility,female_literacy) # Call the output_file() function and specify the name of the file output_file('fert_lit.html') # Display the plot show(p) # Create the figure: p p = figure(x_axis_label='fertility', y_axis_label='female_literacy (% population)') # Add a circle glyph to the figure p p.circle(fertility_latinamerica, female_literacy_latinamerica, size=10, alpha=0.8, color='blue') # Add an x glyph to the figure p p.x(fertility_africa, female_literacy_africa) # Specify the name of the file output_file('fert_lit_separate.html') # Display the plot show(p) #lines # Import figure from bokeh.plotting from bokeh.plotting import figure # Create a figure with x_axis_type="datetime": p p = figure(x_axis_type='datetime', x_axis_label='Date', y_axis_label='US Dollars') # Plot date along the x axis and price along the y axis p.line(date, price) p.circle(date, price, fill_color='white', size=4) # Specify the name of the output file and show the result output_file('line.html') show(p) #patches # Create a list of az_lons, co_lons, nm_lons and ut_lons: x x = [az_lons, co_lons, nm_lons, ut_lons] # Create a list of az_lats, co_lats, nm_lats and ut_lats: y y = [az_lats, co_lats, nm_lats, ut_lats] # Add patches to figure p with line_color=white for x and y p.patches(x,y, line_color='white') # Specify the name of the output file and show the result output_file('four_corners.html') show(p)
/Datacamp/bokeh_interaction.py
# Create a figure with the "box_select" tool: p p = figure(x_axis_label='Year', y_axis_label='Time', tools='box_select') # Add circle glyphs to the figure p with the selected and non-selected properties p.circle('Year', 'Time', source=source,selection_color='red', nonselection_alpha=0.1) # Specify the name of the output file and show the result output_file('selection_glyph.html') show(p) # import the HoverTool from bokeh.models import HoverTool # Add circle glyphs to figure p p.circle(x, y, size=10, fill_color='grey', alpha=0.1, line_color=None, hover_fill_color='firebrick', hover_alpha=0.5, hover_line_color='white') # Create a HoverTool: hover hover = HoverTool(tooltips=None, mode='vline') # Add the hover tool to the figure p p.add_tools(hover) # Specify the name of the output file and show the result output_file('hover_glyph.html') show(p) #Import CategoricalColorMapper from bokeh.models from bokeh.models import CategoricalColorMapper # Convert df to a ColumnDataSource: source source = ColumnDataSource(df) # Make a CategoricalColorMapper object: color_mapper color_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'], palette=['red', 'green', 'blue']) # Add a circle glyph to the figure p p.circle('weight', 'mpg', source=source, color=dict(field='origin', transform=color_mapper), legend='origin') # Specify the name of the output file and show the result output_file('colormap.html') show(p)
/Datacamp/bokeh_layouts.py
# Import row from bokeh.layouts from bokeh.layouts import row, column # Create the first figure: p1 p1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)') # Add a circle glyph to p1 p1.circle('fertility', 'female_literacy', source=source) # Create the second figure: p2 p2 = figure(x_axis_label='population', y_axis_label='female_literacy (% population)') # Add a circle glyph to p2 p2.circle('population', 'female_literacy', source=source) # Put p1 and p2 into a horizontal row: layout layout = row(p1,p2) #layout = column(p1, p2) # Specify the name of the output_file and show the result output_file('fert_row.html') show(layout) # Import column and row from bokeh.layouts from bokeh.layouts import row, column # Make a column layout that will be used as the second row: row2 row2 = column([mpg_hp, mpg_weight], sizing_mode='scale_width') # Make a row layout that includes the above column layout: layout layout = row([avg_mpg, row2], sizing_mode='scale_width') # Specify the name of the output_file and show the result output_file('layout_custom.html') show(layout) # Import gridplot from bokeh.layouts from bokeh.layouts import gridplot # Create a list containing plots p1 and p2: row1 row1 = [p1, p2] # Create a list containing plots p3 and p4: row2 row2 = [p3, p4] # Create a gridplot using row1 and row2: layout layout = gridplot([row1, row2]) # Specify the name of the output_file and show the result output_file('grid.html') show(layout) #TABS # Import Panel from bokeh.models.widgets from bokeh.models.widgets import Panel # Create tab1 from plot p1: tab1 tab1 = Panel(child=p1, title='Latin America') # Create tab2 from plot p2: tab2 tab2 = Panel(child=p2, title='Africa') # Create tab3 from plot p3: tab3 tab3 = Panel(child=p3, title='Asia') # Create tab4 from plot p4: tab4 tab4 = Panel(child=p4, title='Europe') # Import Tabs from bokeh.models.widgets from bokeh.models.widgets import Tabs # Create a Tabs layout: layout layout = Tabs(tabs=[tab1, tab2, tab3, tab4]) # Specify the name of the output_file and show the result output_file('tabs.html') show(layout)
/Datacamp/bokeh_linked_plots.py
#Link the ranges with panning # Link the x_range of p2 to p1: p2.x_range p2.x_range = p1.x_range # Link the y_range of p2 to p1: p2.y_range p2.y_range = p1.y_range # Link the x_range of p3 to p1: p3.x_range p3.x_range = p1.x_range # Link the y_range of p4 to p1: p4.y_range p4.y_range = p1.y_range # Specify the name of the output_file and show the result output_file('linked_range.html') show(layout) #Link selection # Create ColumnDataSource: source source = ColumnDataSource(data) # Create the first figure: p1 p1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female literacy (% population)', tools='box_select,lasso_select') # Add a circle glyph to p1 p1.circle('fertility', 'female literacy', source=source) # Create the second figure: p2 p2 = figure(x_axis_label='fertility (children per woman)', y_axis_label='population (millions)', tools='box_select,lasso_select') # Ad p2.circle('fertility', 'population', source=source) # Create row layout of figures p1 and p2: layout layout = row(p1, p2) # Specify the name of the output_file and show the result output_file('linked_brush.html') show(layout)
/Datacamp/bokeh_numpy_pandas.py
# Import numpy as np import numpy as np # Create array using np.linspace: x x = np.linspace(0,5,100) # Create array using np.cos: y y = np.cos(x) # Add circles at x and y p.circle(x,y) # Specify the name of the output file and show the result output_file('numpy.html') show(p) #pandas # Import pandas as pd import pandas as pd # Read in the CSV file: df df = pd.read_csv('auto.csv') # Import figure from bokeh.plottin from bokeh.plotting import figure # Create the figure: p p = figure(x_axis_label='HP', y_axis_label='MPG') # Plot mpg vs hp by color p.circle( df['hp'], df['mpg'], color=df['color'], size=10) # Specify the name of the output file and show the result output_file('auto-df.html') show(p) #ColumnDataSource # Import the ColumnDataSource class from bokeh.plotting from bokeh.plotting import ColumnDataSource # Create a ColumnDataSource from df: source source = ColumnDataSource(df) # Add circle glyphs to the figure p p.circle('Year', 'Time', source=source, color='color',size=8) # Specify the name of the output file and show the result output_file('sprint.html') show(p)
/Datacamp/bokeh_tooltips.py
# Add the first circle glyph to the figure p p.circle('fertility', 'female_literacy', source=latin_america, size=10, color='red', legend='Latin America') # Add the second circle glyph to the figure p p.circle('fertility', 'female_literacy', source=africa, size=10, color='blue', legend='Africa') # Assign the legend to the bottom left: p.legend.location p.legend.location = 'bottom_left' # Fill the legend background with the color 'lightgray': p.legend.background_fill_color p.legend.background_fill_color='lightgray' # Specify the name of the output_file and show the result output_file('fert_lit_groups.html') show(p) # Import HoverTool from bokeh.models from bokeh.models import HoverTool # Create a HoverTool object: hover hover = HoverTool(tooltips=[('Country','@Country')]) # Add the HoverTool object to figure p p.add_tools(hover) # Specify the name of the output_file and show the result output_file('hover.html') show(p)
/Datacamp/bootstrapping.py
for i in range(50): # Generate bootstrap sample: bs_sample bs_sample = np.random.choice(rainfall, size=len(rainfall)) # Compute and plot ECDF from bootstrap sample x, y = ecdf(bs_sample) _ = plt.plot(x=x, y=y, marker='.', linestyle='none', color='gray', alpha=0.1) # Compute and plot ECDF from original data x, y = ecdf(rainfall) _ = plt.plot(x=x, y=y, marker='.') # Make margins and label axes plt.margins(0.02) _ = plt.xlabel('yearly rainfall (mm)') _ = plt.ylabel('ECDF') # Show the plot plt.show() def draw_bs_reps(data, func, size=1): """Draw bootstrap replicates.""" # Initialize array of replicates: bs_replicates bs_replicates = np.empty(size) # Generate replicates for i in range(size): bs_replicates[i] = bootstrap_replicate_1d(data, func) #applies func to bootstrap sample return bs_replicates # Take 10,000 bootstrap replicates of the mean: bs_replicates bs_replicates = draw_bs_reps(rainfall,np.mean,10000) # Compute and print SEM sem = np.std(rainfall) / np.sqrt(len(rainfall)) print(sem) # Compute and print standard deviation of bootstrap replicates bs_std = np.std(bs_replicates) print(bs_std) # Make a histogram of the results _ = plt.hist(bs_replicates, bins=50, normed=True) _ = plt.xlabel('mean annual rainfall (mm)') _ = plt.ylabel('PDF') # Show the plot plt.show() # Draw bootstrap replicates of the mean no-hitter time (equal to tau): bs_replicates bs_replicates = draw_bs_reps(nohitter_times, np.mean, 10000) # Compute the 95% confidence interval: conf_int conf_int = np.percentile(bs_replicates, [2.5, 97.5]) # Print the confidence interval print('95% confidence interval =', conf_int, 'games') # Plot the histogram of the replicates _ = plt.hist(bs_replicates, bins=50, normed=True) _ = plt.xlabel(r'$\tau$ (games)') _ = plt.ylabel('PDF') # Show the plot plt.show()
/Datacamp/data-types.py
# Convert the sex column to type 'category' tips.sex = tips.sex.astype('category') #converting to categorical vars helps with memory and further analysis # Convert the smoker column to type 'category' tips.smoker = tips.smoker.astype('category') # Print the info of tips print(tips.info()) #sometimes we may need to tell python how to deal with values it can't convert tips['total_bill'] = pd.to_numeric(tips['total_bill'], errors='coerce')
/Datacamp/data_explore.py
import pandas as pd df = pd.read_csv('....') df.head() df.tail() df.columns df.shape #Display summary stats of numeric columns df.describe() #Display frequencies of categorical columns df['Borough'].value_counts(dropna=False) #display means and counts of columns df[['col1', 'col2']].count() df[['col1', 'col2']].mean() df['2015'].quantile([0.05, 0.95]) # Import matplotlib.pyplot import matplotlib.pyplot as plt # Plot the histogram df['Existing Zoning Sqft'].plot(kind='hist', rot=70, logx=True, logy=True) # Display the histogram plt.show() # Create the boxplot df.boxplot(column='initial_cost', by='Borough', rot=90) # Display the plot plt.show()
/Datacamp/dataframe_arithmetic.py
# Extract selected columns from weather as new DataFrame: temps_f temps_f = weather[['Min TemperatureF', 'Mean TemperatureF', 'Max TemperatureF']] # Convert temps_f to celsius: temps_c temps_c = (temps_f - 32) * 5/9 #broadcasting # Rename 'F' in column names with 'C': temps_c.columns temps_c.columns = ['Min TemperatureC', 'Mean TemperatureC', 'Max TemperatureC'] # Print first 5 rows of temps_c print(temps_c.head()) import pandas as pd # Read 'GDP.csv' into a DataFrame: gdp gdp = pd.read_csv('GDP.csv', index_col='DATE', parse_dates=True) # Slice all the gdp data from 2008 onward: post2008 post2008 = gdp.loc['2008':, :] # Print the last 8 rows of post2008 print(post2008.tail(8)) VALUE DATE 2014-07-01 17569.4 2014-10-01 17692.2 2015-01-01 17783.6 2015-04-01 17998.3 2015-07-01 18141.9 2015-10-01 18222.8 2016-01-01 18281.6 2016-04-01 18436.5 # Resample post2008 by year, keeping last(): yearly yearly = post2008.resample('A').last() # Print yearly print(yearly) VALUE DATE 2014-07-01 17569.4 2014-10-01 17692.2 2015-01-01 17783.6 2015-04-01 17998.3 2015-07-01 18141.9 2015-10-01 18222.8 2016-01-01 18281.6 2016-04-01 18436.5 # Compute percentage growth of yearly: yearly['growth'] yearly['growth'] = yearly.pct_change()*100 # Print yearly again print(yearly) VALUE growth DATE 2008-12-31 14549.9 NaN 2009-12-31 14566.5 0.114090 2010-12-31 15230.2 4.556345 2011-12-31 15785.3 3.644732 2012-12-31 16297.3 3.243524 2013-12-31 16999.9 4.311144 2014-12-31 17692.2 4.072377 2015-12-31 18222.8 2.999062 2016-12-31 18436.5 1.172707 # Import pandas import pandas as pd # Read 'sp500.csv' into a DataFrame: sp500 sp500 = pd.read_csv('sp500.csv', index_col='Date', parse_dates=True) # Read 'exchange.csv' into a DataFrame: exchange exchange = pd.read_csv('exchange.csv', index_col='Date', parse_dates=True) # Subset 'Open' & 'Close' columns from sp500: dollars dollars = sp500[['Open', 'Close']] # Print the head of dollars print(dollars.head()) # Convert dollars to pounds: pounds pounds = dollars.multiply(exchange['GBP/USD'], axis='rows') #NOTE: similar add(), subtract(), divide() methods. These offer more flexibility than using standard +, -, / operators # Print the head of pounds print(pounds.head())
/Datacamp/datetime_indices.py
#To read in df = pd.read_csv('data.csv', parse_dates=True, index_col='Date) # Prepare a format string: time_format time_format = '%Y-%m-%d %H:%M' # Convert date_list into a datetime object: my_datetimes my_datetimes = pd.to_datetime(date_list, format=time_format) # Construct a pandas Series using temperature_list and my_datetimes: time_series time_series = pd.Series(temperature_list, index=my_datetimes) # Extract the hour from 9pm to 10pm on '2010-10-11': ts1 ts1 = ts0.loc['2010-10-11 21:00:00':'2010-10-11 22:00:00'] # Extract '2010-07-04' from ts0: ts2 ts2 = ts0.loc['2010-07-04'] # Extract data from '2010-12-15' to '2010-12-31': ts3 ts3 = ts0.loc['2010-12-15':'2010-12-31'] #Sometimes we may wnat to reindex a df using the timeseries index of another df. #python fills in non-matching indices with Nan values # Reindex without fill method: ts3 ts3 = ts2.reindex(ts1.index) # Reindex with fill method, using forward fill: ts4 ts4 = ts2.reindex(ts1.index, method='ffill')
/Datacamp/grouby.py
# Group titanic by 'pclass' by_class = titanic.groupby('pclass') # Aggregate 'survived' column of by_class by count count_by_class = by_class['survived'].count() # Print count_by_class print(count_by_class) # Group titanic by 'embarked' and 'pclass' by_mult = titanic.groupby(['embarked', 'pclass']) # Aggregate 'survived' column of by_mult by count count_mult = by_mult['survived'].count() # Print count_mult print(count_mult) # Read life_fname into a DataFrame: life life = pd.read_csv(life_fname, index_col='Country') # Read regions_fname into a DataFrame: regions regions = pd.read_csv(regions_fname, index_col='Country') # Group life by regions['region']: life_by_region. This is doable because of the same indexes life_by_region = life.groupby(regions.region) # Print the mean over the '2010' column of life_by_region print(life_by_region['2010'].mean()) # Group titanic by 'pclass': by_class by_class = titanic.groupby('pclass') # Select 'age' and 'fare' by_class_sub = by_class[['age','fare']] # Aggregate by_class_sub by 'max' and 'median': aggregated aggregated = by_class_sub.agg(['max', 'median']) age fare max median max median pclass 1 80.0 39.0 512.3292 60.0000 2 70.0 29.0 73.5000 15.0458 3 74.0 24.0 69.5500 8.0500 # Print the maximum age in each class print(aggregated.loc[:, ('age','max')]) pclass 1 80.0 2 70.0 3 74.0 Name: (age, max), dtype: float64 # Print the median fare in each class print(aggregated.loc[:, ('fare', 'median')]) pclass 1 80.0 2 70.0 3 74.0 Name: (age, max), dtype: float64 # Read the CSV file into a DataFrame and sort the index: gapminder gapminder = pd.read_csv('gapminder.csv', index_col=['Year','region','Country']).sort_index() # Group gapminder by 'Year' and 'region': by_year_region by_year_region = gapminder.groupby(level = ['Year', 'region']) # Define the function to compute spread: spread def spread(series): return series.max() - series.min() # Create the dictionary: aggregator aggregator = {'population':'sum', 'child_mortality':'mean', 'gdp':spread} # Aggregate by_year_region using the dictionary: aggregated aggregated = by_year_region.agg(aggregator) # Print the last 6 entries of aggregated print(aggregated.tail(6)) # Read file: sales sales = pd.read_csv('sales.csv', index_col='Date', parse_dates=True) # Create a groupby object: by_day by_day = sales.groupby(sales.index.strftime('%a')) # Create sum: units_sum units_sum = by_day['Units'].sum() # Print units_sum print(units_sum)
/Datacamp/hypothesis_testing_with_one_dataset.py
# Make an array of translated impact forces: translated_force_b translated_force_b = force_b - np.mean(force_b) + 0.55 # Take bootstrap replicates of Frog B's translated impact forces: bs_replicates bs_replicates = draw_bs_reps(translated_force_b, np.mean, 10000) # Compute fraction of replicates that are less than the observed Frog B force: p p = np.sum(bs_replicates <= np.mean(force_b)) / 10000 # Print the p-value print('p = ', p) # Compute mean of all forces: mean_force mean_force = np.mean(forces_concat) # Generate shifted arrays force_a_shifted = force_a - np.mean(force_a) + mean_force force_b_shifted = force_b - np.mean(force_b) + mean_force # Compute 10,000 bootstrap replicates from shifted arrays bs_replicates_a = draw_bs_reps(force_a_shifted, np.mean, 10000) bs_replicates_b = draw_bs_reps(force_b_shifted, np.mean, 10000) # Get replicates of difference of means: bs_replicates bs_replicates = bs_replicates_a-bs_replicates_b # Compute and print p-value: p p = np.sum(bs_replicates >= (np.mean(force_a)-np.mean(force_b))) / 10000 print('p-value =', p)
/Datacamp/idxmax_idxmin.py
# Create the pivot table: medals_won_by_country medals_won_by_country = medals.pivot_table(index = 'Edition', columns='NOC', values= "Athlete", aggfunc='count') # Slice medals_won_by_country: cold_war_usa_urs_medals cold_war_usa_urs_medals = medals_won_by_country.loc[1952:1988, ['USA','URS']] NOC USA URS Edition 1952 130.0 117.0 1956 118.0 169.0 1960 112.0 169.0 1964 150.0 174.0 1968 149.0 188.0 1972 155.0 211.0 1976 155.0 285.0 1980 NaN 442.0 1984 333.0 NaN 1988 193.0 294.0 # If .max() returns the maximum value of Series or 1D array, .idxmax() returns the index of the maximizing element. # Create most_medals most_medals = cold_war_usa_urs_medals.idxmax(axis='columns') Edition 1952 USA 1956 URS 1960 URS 1964 URS 1968 URS 1972 URS 1976 URS 1980 URS 1984 USA 1988 URS dtype: object # Print most_medals.value_counts() print(most_medals.value_counts()) In [5]: cold_war_usa_urs_medals.idxmax() Out[5]: NOC USA 1984 URS 1980 dtype: int64
/Datacamp/indexing.py
#indexing as: df[['...', '....']] #returns a DataFrame p_counties = election.loc['Perry':'Potter', :] # Slice the row labels 'Potter' to 'Perry' in reverse order: p_counties_rev p_counties_rev = election.loc['Potter':'Perry':-1, :] # Slice the columns from the starting column to 'Obama': left_columns left_columns = election.loc[:, :'Obama'] # Print the output of left_columns.head() print(left_columns.head()) # Slice the columns from 'Obama' to 'winner': middle_columns middle_columns = election.loc[:, 'Obama':'winner'] # Print the output of middle_columns.head() print(middle_columns.head()) # Slice the columns from 'Romney' to the end: 'right_columns' right_columns = election.loc[:, 'Romney':] #inddexes are immutables, therefore to change it the whole index needs to be overwritten; # Create the list of new indexes: new_idx new_idx = [ind.upper() for ind in sales.index] # Assign new_idx to sales.index sales.index = new_idx # Assign the string 'MONTHS' to sales.index.name sales.index.name = 'MONTHS' # Print the sales DataFrame print(sales) # Assign the string 'PRODUCTS' to sales.columns.name sales.columns.name = 'PRODUCTS' # Print the sales dataframe again print(sales)
/Datacamp/manipulating_indices.py
# Import pandas import pandas as pd # Read 'monthly_max_temp.csv' into a DataFrame: weather1 weather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month') # Print the head of weather1 print(weather1.head()) # Sort the index of weather1 in alphabetical order: weather2 weather2 = weather1.sort_index() # Print the head of weather2 print(weather2.head()) # Sort the index of weather1 in reverse alphabetical order: weather3 weather3 = weather1.sort_index(ascending=False) # Print the head of weather3 print(weather3.head()) # Sort weather1 numerically using the values of 'Max TemperatureF': weather4 weather4 = weather1.sort_values('Max TemperatureF') # Print the head of weather4 print(weather4.head()) # Import pandas import pandas as pd # Reindex weather1 using the list year: weather2 weather2 = weather1.reindex(year) # Print weather2 print(weather2) # Reindex weather1 using the list year with forward-fill: weather3 weather3 = weather1.reindex(year).ffill() # Print weather3 print(weather3) Mean TemperatureF Month Jan 32.133333 Feb NaN Mar NaN Apr 61.956044 May NaN Jun NaN Jul 68.934783 Aug NaN Sep NaN Oct 43.434783 Nov NaN Dec NaN Mean TemperatureF Month Jan 32.133333 Feb 32.133333 Mar 32.133333 Apr 61.956044 May 61.956044 Jun 61.956044 Jul 68.934783 Aug 68.934783 Sep 68.934783 Oct 43.434783 Nov 43.434783 Dec 43.434783 # Import pandas import pandas as pd # Reindex names_1981 with index of names_1881: common_names common_names = names_1981.reindex(names_1881.index) # Print shape of common_names print(common_names.shape) # Drop rows with null counts: common_names common_names = common_names.dropna() # Print shape of new common_names print(common_names.shape)
/Datacamp/melting.py
#melting restores pivoted dfs visitors = pd.melt(visitors_by_city_weekday, id_vars=['weekday'], value_name='visitors') #id_vars specify columns to maintain #value_names specify name of column containing the values # Set the new index: users_idx users_idx = users.set_index(['city', 'weekday']) # Print the users_idx DataFrame print(users_idx) visitors signups city weekday Austin Sun 139 7 Dallas Sun 237 12 Austin Mon 326 3 Dallas Mon 456 5 # Obtain the key-value pairs: kv_pairs kv_pairs = pd.melt(users_idx, col_level=0) # Print the key-value pairs print(kv_pairs) variable value 0 visitors 139 1 visitors 237 2 visitors 326 3 visitors 456 4 signups 7 5 signups 12 6 signups 3 7 signups 5
/Datacamp/merge_ordered.py
#Used for mereging when there is an ordering (eg dates) # Perform the first ordered merge: tx_weather tx_weather = pd.merge_ordered(austin, houston) # Print tx_weather print(tx_weather) # Perform the second ordered merge: tx_weather_suff tx_weather_suff = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus']) # Print tx_weather_suff print(tx_weather_suff) # Perform the third ordered merge: tx_weather_ffill tx_weather_ffill = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'], fill_method='ffill') # Print tx_weather_ffill print(tx_weather_ffill) #Similar to pd.merge_ordered(), the pd.merge_asof() function will also merge #values in order using the on column, but for each row in the left DataFrame, #only rows from the right DataFrame whose 'on' column values are less than the #left value will be kept. #This function can be used to align disparate datetime frequencies without having to first resample. oil.head() Date Price 0 1970-01-01 3.35 1 1970-02-01 3.35 2 1970-03-01 3.35 3 1970-04-01 3.35 4 1970-05-01 3.35 auto.head() mpg cyl displ hp weight accel yr origin \ 0 18.0 8 307.0 130 3504 12.0 1970-01-01 US 1 15.0 8 350.0 165 3693 11.5 1970-01-01 US 2 18.0 8 318.0 150 3436 11.0 1970-01-01 US 3 16.0 8 304.0 150 3433 12.0 1970-01-01 US 4 17.0 8 302.0 140 3449 10.5 1970-01-01 US name 0 chevrolet chevelle malibu 1 buick skylark 320 2 plymouth satellite 3 amc rebel sst 4 ford torino # Merge auto and oil: merged merged = pd.merge_asof(auto, oil, left_on='yr', right_on='Date') # Print the tail of merged print(merged.tail()) mpg cyl displ hp weight accel yr origin name \ 387 27.0 4 140.0 86 2790 15.6 1982-01-01 US ford mustang gl 388 44.0 4 97.0 52 2130 24.6 1982-01-01 Europe vw pickup 389 32.0 4 135.0 84 2295 11.6 1982-01-01 US dodge rampage 390 28.0 4 120.0 79 2625 18.6 1982-01-01 US ford ranger 391 31.0 4 119.0 82 2720 19.4 1982-01-01 US chevy s-10 Date Price 387 1982-01-01 33.85 388 1982-01-01 33.85 389 1982-01-01 33.85 390 1982-01-01 33.85 391 1982-01-01 33.85 # Resample merged: yearly yearly = merged.resample('A', on='Date')[['mpg','Price']].mean() # Print yearly print(yearly) mpg Price Date 1970-12-31 17.689655 3.35 1971-12-31 21.111111 3.56 1972-12-31 18.714286 3.56 1973-12-31 17.100000 3.56 1974-12-31 22.769231 10.11 1975-12-31 20.266667 11.16 1976-12-31 21.573529 11.16 1977-12-31 23.375000 13.90 1978-12-31 24.061111 14.85 1979-12-31 25.093103 14.85 1980-12-31 33.803704 32.50 1981-12-31 30.185714 38.00 1982-12-31 32.000000 33.85 # print yearly.corr() print(yearly.corr()) mpg Price Date 1970-12-31 17.689655 3.35 1971-12-31 21.111111 3.56 1972-12-31 18.714286 3.56 1973-12-31 17.100000 3.56 1974-12-31 22.769231 10.11 1975-12-31 20.266667 11.16 1976-12-31 21.573529 11.16 1977-12-31 23.375000 13.90 1978-12-31 24.061111 14.85 1979-12-31 25.093103 14.85 1980-12-31 33.803704 32.50 1981-12-31 30.185714 38.00 1982-12-31 32.000000 33.85
/Datacamp/merging.py
# Merge revenue with managers on 'city': merge_by_city merge_by_city = pd.merge(revenue, managers, on='city') # Print merge_by_city print(merge_by_city) # Merge revenue with managers on 'branch_id': merge_by_id merge_by_id = pd.merge(revenue, managers, on='branch_id') # Print merge_by_id print(merge_by_id) # Add 'state' column to revenue: revenue['state'] revenue['state'] = ['TX','CO','IL','CA'] # Add 'state' column to managers: managers['state'] managers['state'] = ['TX','CO','CA', 'MO'] # Merge revenue & managers on 'branch_id', 'city', & 'state': combined combined = pd.merge(revenue, managers, on=['branch_id', 'city','state']) # Print combined print(combined) #matching columns are suffixed with _x, _y. This can be changed with 'suffixes = [..., ...]' arg o2o = pd.merge(left=site, right=visited, left_on='name', right_on='site') #This will handle 1-to-1, many-to-1 and many-to-many merges # Merge revenue and sales: revenue_and_sales revenue_and_sales = pd.merge(revenue, sales, how='right',on=['city', 'state']) # Print revenue_and_sales print(revenue_and_sales) # Merge sales and managers: sales_and_managers sales_and_managers = pd.merge(sales, managers, how='left',left_on=['city', 'state'], right_on=['branch', 'state']) # Print sales_and_managers print(sales_and_managers)
/Datacamp/multi_indexing.py
#Sometimes we may want multiple row indexes in a heirachical order # Set the index to be the columns ['state', 'month']: sales sales = sales.set_index(['state', 'month']) # Sort the MultiIndex: sales sales = sales.sort_index() sales = eggs salt spam state month CA 1 47 12.0 17 2 110 50.0 31 NY 1 221 89.0 72 2 77 87.0 20 TX 1 132 NaN 52 2 205 60.0 55 # Look up data for NY in month 1: NY_month1 NY_month1 = sales.loc[('NY', 1)] # Look up data for CA and TX in month 2: CA_TX_month2 CA_TX_month2 = sales.loc[(['CA', 'TX'], 2),:] # Look up data for all states in month 2: all_month2 all_month2 = sales.loc[(slice(None), 2),:]
/Datacamp/pandas.py
#Dataframes are made up of Series objects. Each Series is labelled 1D numpy array import pandas as pd #df is some DataFrame df.head() df.tail() df.iloc[1, :] df.loc['row_index', :] #to return column info df.info() #to convert DataFrame to numpy array: df.values #note though that many numpy methods work on pandas dfs ######## #creating Dataframes from scratch ######## d = {"col1" :[1,3,4,5], "col2" : [4,5,6,7]} df = pd.DataFrame(d) col1 = [1, 3, 5, 6] col2 = [6, 7, 8, 9] cols = [col1, col2] indices = ["col1", "col2"] d = zip(indices, cols) d = dict(list(d)) df = pd.DataFramed df.columns = ["newcol1", "newcol2"] #Broadcasting df['col3'] = "M" d = {"col1" : [1, 3, 4, 5], "col2" : "M"} df = pd.DataFrame(d) #Broadcasts col2
/Datacamp/parameter_optimisation.py
# Seed random number generator np.random.seed(42) # Compute mean no-hitter time: tau tau = np.mean(nohitter_times) # Draw out of an exponential distribution with parameter tau: inter_nohitter_time inter_nohitter_time = np.random.exponential(tau, 100000) # Plot the PDF and label axes _ = plt.hist(inter_nohitter_time, bins=50, normed=True, histtype='step') _ = plt.xlabel('Games between no-hitters') _ = plt.ylabel('PDF') # Show the plot plt.show() #Verigy using cdf # Create an ECDF from real data: x, y x, y = ecdf(nohitter_times) # Create a CDF from theoretical samples: x_theor, y_theor x_theor, y_theor = ecdf(inter_nohitter_time) # Overlay the plots plt.plot(x=x_theor, y=y_theor) plt.plot(x=x, y=y, marker='.', linestyle='none') # Margins and axis labels plt.margins(0.02) plt.xlabel('Games between no-hitters') plt.ylabel('CDF') # Show the plot plt.show() # Plot the theoretical CDFs plt.plot(x_theor, y_theor) plt.plot(x, y, marker='.', linestyle='none') plt.margins(0.02) plt.xlabel('Games between no-hitters') plt.ylabel('CDF') # Take samples with half tau: samples_half samples_half = np.random.exponential(tau/2,10000) # Take samples with double tau: samples_double samples_double = np.random.exponential(2*tau,10000) # Generate CDFs from these samples x_half, y_half = ecdf(samples_half) x_double, y_double = ecdf(samples_double) # Plot these CDFs as lines _ = plt.plot(x_half, y_half) _ = plt.plot(x_double, y_double) # Show the plot plt.show()
/Datacamp/pivit_tables.py
#pivot tables aggregate data with duplicate indices weekday city visitors signups 0 Sun Austin 139 7 1 Sun Dallas 237 12 2 Mon Austin 326 3 3 Mon Dallas 456 5 # Create the DataFrame with the appropriate pivot table: by_city_day by_city_day = users.pivot_table(index = 'weekday', columns = "city") # Print by_city_day print(by_city_day) signups visitors city Austin Dallas Austin Dallas weekday Mon 3 5 326 456 Sun 7 12 139 237 # Use a pivot table to display the count of each column: count_by_weekday1 count_by_weekday1 = users.pivot_table(index='weekday', aggfunc='count') # Print count_by_weekday print(count_by_weekday1) city signups visitors weekday Mon 2 2 2 Sun 2 2 2 # Replace 'aggfunc='count'' with 'aggfunc=len': count_by_weekday2 count_by_weekday2 = users.pivot_table(index='weekday', aggfunc=len) # Create the DataFrame with the appropriate pivot table: signups_and_visitors signups_and_visitors = users.pivot_table(index = "weekday", aggfunc=sum) # Print signups_and_visitors print(signups_and_visitors) signups visitors weekday Mon 8 782 Sun 19 376 # Add in the margins: signups_and_visitors_total signups_and_visitors_total = users.pivot_table(index = "weekday", aggfunc=sum, margins=True) # Print signups_and_visitors_total print(signups_and_visitors_total) signups visitors weekday Mon 8 782 Sun 19 376 All 27 1158
/Datacamp/pivoting_tables.py
#EG: id treatment gender response 0 1 A F 5 1 2 A M 3 2 3 B F 8 3 4 B M 9 df.pivot(index = "treatment", columns = "gender", values = "response") #pivot gender F M treatment A 5 3 B 8 9 #Not specifying the values will pivot all columns
/Datacamp/readin_and_cleaning.py
# Read in the data file with header=None: df_headers df_headers = pd.read_csv(data_file, header=None) # Print the output of df_headers.head() print(df_headers.head()) # Split on the comma to create a list: column_labels_list column_labels_list = column_labels.split(",") # Assign the new column labels to the DataFrame: df.columns df.columns = column_labels_list # Remove the appropriate columns: df_dropped df_dropped = df.drop(list_to_drop, axis = 'columns') # Print the output of df_dropped.head() print(df_dropped.head()) # Convert the date column to string: df_dropped['date'] df_dropped['date'] = df_dropped['date'].astype(str) # Pad leading zeros to the Time column: df_dropped['Time'] df_dropped['Time'] = df_dropped['Time'].apply(lambda x:'{:0>4}'.format(x)) # Concatenate the new date and Time columns: date_string date_string = df_dropped.date + df_dropped.Time # Convert the date_string Series to datetime: date_times date_times = pd.to_datetime(date_string, format='%Y%m%d%H%M') # Set the index to be the new date_times container: df_clean df_clean = df_dropped.set_index(date_times) # Print the dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011 print(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren']) # Convert the dry_bulb_faren column to numeric values: df_clean['dry_bulb_faren'] df_clean['dry_bulb_faren'] = pd.to_numeric(df_clean['dry_bulb_faren'], errors='coerce') # Print the transformed dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011 print(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren']) # Convert the wind_speed and dew_point_faren columns to numeric values df_clean['wind_speed'] = pd.to_numeric(df_clean['wind_speed'], errors='coerce') df_clean['dew_point_faren'] = pd.to_numeric(df_clean['dew_point_faren'], errors='coerce')
/Datacamp/resampling.py
#If a df is indexed by date-time, we can perform resampling. #Downsampling is when we go to a lower unit, lower unit being one with fewer units in a period (lowere frequency) #Downsample from hours to days #Upsampling is the opposite and will introduce Nana, unless otherwise catered for through filling methods # Downsample to 6 hour data and aggregate by mean: df1 df1 = df.Temperature.resample('6h').mean() # Downsample to daily data and count the number of data points: df2 df2 = df.Temperature.resample('D').count() # Extract temperature data for August: august august = df.Temperature.loc['2010-08'] # Downsample to obtain only the daily highest temperatures in August: august_highs august_highs = august.resample('D').max() # Extract temperature data for February: february february = df.Temperature.loc['2010-02'] # Downsample to obtain the daily lowest temperatures in February: february_lows february_lows = february.resample('D').min() # Extract data from 2010-Aug-01 to 2010-Aug-15: unsmoothed unsmoothed = df['Temperature']['2010-Aug-01':'2010-Aug-15'] # Apply a rolling mean with a 24 hour window: smoothed smoothed = unsmoothed.rolling(window=24).mean() # Create a new DataFrame with columns smoothed and unsmoothed: august august = pd.DataFrame({'smoothed':smoothed, 'unsmoothed':unsmoothed}) # Plot both smoothed and unsmoothed data using august.plot(). august.plot() plt.show()
/Datacamp/seaborn.py
# Import plotting modules import matplotlib.pyplot as plt import seaborn as sns # Plot a linear regression between 'weight' and 'hp' sns.lmplot(x='weight', y='hp', data=auto) # Display the plot plt.show() #RESIDUALS # Import plotting modules import matplotlib.pyplot as plt import seaborn as sns # Generate a green residual plot of the regression between 'hp' and 'mpg' sns.residplot(x='hp', y='mpg', data=auto, color='green') # Display the plot plt.show() #HIGHER ORDER # Generate a scatter plot of 'weight' and 'mpg' using red circles plt.scatter(auto['weight'], auto['mpg'], label='data', color='red', marker='o') # Plot in blue a linear regression of order 1 between 'weight' and 'mpg' sns.regplot(x='weight', y='mpg', data=auto, label='order 1', color='blue', order=1, scatter=None) # Plot in green a linear regression of order 2 between 'weight' and 'mpg' sns.regplot(x='weight', y='mpg', data=auto, label='order 2', color='green', order=2, scatter=None) # Add a legend and display the plot plt.legend(loc='upper right') plt.show() # Plot a linear regression between 'weight' and 'hp', with a hue (specifies categories) of 'origin' and palette of 'Set1' sns.lmplot('weight', 'hp', data=auto, hue='origin', palette='Set1') # Display the plot plt.show() # Plot linear regressions between 'weight' and 'hp' grouped row-wise by 'origin' sns.lmplot('weight', 'hp', data=auto, row='origin') # Display the plot plt.show()
/Datacamp/seaborn_multivariate.py
kind='scatter' uses a scatter plot of the data points kind='reg' uses a regression plot (default order 1) kind='resid' uses a residual plot kind='kde' uses a kernel density estimate of the joint distribution kind='hex' uses a hexbin plot of the joint distribution # Generate a joint plot of 'hp' and 'mpg' sns.jointplot(x='hp', y='mpg', data=auto) # Generate a joint plot of 'hp' and 'mpg' using a hexbin plot sns.jointplot(x='hp', y='mpg', data=auto, kind='hex') # Display the plot plt.show() #Plot of all numeric columns against one another # Print the first 5 rows of the DataFrame print(auto.head()) # Plot the pairwise joint distributions from the DataFrame sns.pairplot(auto, hue='origin', kind='reg') # Display the plot plt.show() # Print the covariance matrix print(cov_matrix) # Visualize the covariance matrix using a heatmap sns.heatmap(cov_matrix) # Display the heatmap plt.show()
/Datacamp/sqlalchemy.py
# Import create_engine from sqlalchemy import create_engine # Create an engine that connects to the census.sqlite file: engine engine = create_engine('sqlite:///census.sqlite')# Create an engine to the census database engine = create_engine('mysql+pymysql://'+'student:datacamp'+'@courses.csrrinzqubik.us-east-1.rds.amazonaws.com:3306/'+'census') # Print table names print(engine.table_names()) #Reflection is the process of reading the database and building the metadata #based on that information. It's the opposite of creating a Table by hand and #is very useful for working with existing databases. To perform reflection, you need to import #the Table object from the SQLAlchemy package. Then, you use this Table object to read #your table from the engine and autoload the columns. Using the Table object in this manner #is a lot like passing arguments to a function. For example, to autoload the columns with the engine, #you have to specify the keyword arguments autoload=True and autoload_with=engine to Table(). # Import Table from sqlalchemy import Table, MetaData metadata = MetaData() # Reflect census table from the engine: census census = Table('census', metadata, autoload=True, autoload_with=engine) # Print the column names print(census.columns.keys()) # Print full table metadata print(repr(metadata.tables['census'])) # Print census table metadata print(repr(census))
/Datacamp/sqlalchemy_grouping_labeling.py
# Build a query to count the distinct states values: stmt stmt = select([func.count(census.columns.state.distinct())]) # Execute the query and store the scalar result: distinct_state_count distinct_state_count = connection.execute(stmt).scalar() # Print the distinct_state_count print(distinct_state_count) # Import func from sqlalchemy import func # Build a query to select the state and count of ages by state: stmt stmt = select([census.columns.state, func.count(census.columns.age)]) # Group stmt by state stmt = stmt.group_by(census.columns.state) # Execute the statement and store all the records: results results = connection.execute(stmt).fetchall() # Print results print(results) # Print the keys/column names of the results returned print(results[0].keys()) # Import func from sqlalchemy import func # Build an expression to calculate the sum of pop2008 labeled as population pop2008_sum = func.sum(census.columns.pop2008).label('population') # Build a query to select the state and sum of pop2008: stmt stmt = select([census.columns.state, pop2008_sum]) # Group stmt by state stmt = stmt.group_by(census.columns.state) # Execute the statement and store all the records: results results = connection.execute(stmt).fetchall() # Print results print(results) # Print the keys/column names of the results returned print(results[0].keys())
/Datacamp/sqlalchemy_joins.py
#IF a table has an already defined relationship: # Build a statement to join census and state_fact tables: stmt stmt = select([census.columns.pop2000, state_fact.columns.abbreviation]) # Execute the statement and get the first result: result result = connection.execute(stmt).first() # Loop over the keys in the result object and print the key and value for key in result.keys(): print(key, getattr(result, key)) # Build a statement to select the census and state_fact tables: stmt stmt = select([census, state_fact]) # Add a select_from clause that wraps a join for the census and state_fact # tables where the census state column and state_fact name column match stmt = stmt.select_from( census.join(state_fact, census.columns.state == state_fact.columns.name)) # Execute the statement and get the first result: result result = connection.execute(stmt).first() # Loop over the keys in the result object and print the key and value for key in result.keys(): print(key, getattr(result, key)) # Build a statement to select the state, sum of 2008 population and census # division name: stmt stmt = select([ census.columns.state, func.sum(census.columns.pop2008), state_fact.columns.census_division_name ]) # Append select_from to join the census and state_fact tables by the census state and state_fact name columns stmt = stmt.select_from( census.join(state_fact, census.columns.state == state_fact.columns.name) ) # Append a group by for the state_fact name column stmt = stmt.group_by(state_fact.columns.name) # Execute the statement and get the results: results results = connection.execute(stmt).fetchall()
/Datacamp/sqlalchemy_more_statements.py
# Create a select query: stmt stmt = select([census]) # Add a where clause to filter the results to only those for New York stmt = stmt.where(census.columns.state == 'New York') # Execute the query to retrieve all the data returned: results results = connection.execute(stmt).fetchall() # Loop over the results and print the age, sex, and pop2008 for result in results: print(result.age, result.sex, result.pop2008) # Create a query for the census table: stmt stmt = select([census]) # Append a where clause to match all the states in_ the list states stmt = stmt.where(census.columns.state.in_(states)) # Loop over the ResultProxy and print the state and its population in 2000 for i in connection.execute(stmt).fetchall(): print(i.state, i.pop2000) # Import and_ from sqlalchemy import and_ # Build a query for the census table: stmt stmt = select([census]) # Append a where clause to select only non-male records from California using and_ stmt = stmt.where( # The state of California with a non-male sex and_(census.columns.state == 'California', census.columns.sex != 'M' ) ) # Loop over the ResultProxy printing the age and sex for result in connection.execute(stmt).fetchall(): print(result.age, result.sex) # Build a query to select the state column: stmt stmt = select([census.columns.state]) # Order stmt by the state column stmt = stmt.order_by(census.columns.state) #desc(census.columns.state) ## Build a query to select state and age: stmt #stmt = select([census.columns.state, census.columns.age]) # ## Append order by to ascend by state and descend by age #stmt = stmt.order_by(census.columns.state, desc(census.columns.age)) # Execute the query and store the results: results results = connection.execute(stmt).fetchall() # Print the first 10 results print(results[:10])
/Datacamp/sqlalchemy_statements.py
# Import create_engine from sqlalchemy import create_engine # Create an engine that connects to the census.sqlite file: engine engine = create_engine('sqlite:///census.sqlite') connection = engine.connect() # Build select statement for census table: stmt stmt = "SELECT * FROM census" # Execute the statement and fetch the results: results results = connection.execute(stmt).fetchall() # Print results print(results) #ALTERNATIVELY # Import select from sqlalchemy import select # Reflect census table via engine: census census = Table('census', metadata, autoload=True, autoload_with=engine) # Build select statement for census table: stmt stmt = select([census]) # Print the emitted statement to see the SQL emitted print(stmt) # Execute the statement and print the results print(connection.execute(stmt).fetchall()) # #Recall the differences between a ResultProxy and a ResultSet: # # ResultProxy: The object returned by the .execute() method. It can be used in a variety of ways to get the data returned by the query. # ResultSet: The actual data asked for in the query when using a fetch method such as .fetchall() on a ResultProxy. #This separation between the ResultSet and ResultProxy allows us to fetch as much or as little data as we desire. results = connection.execute(stmt).fetchall() # Get the first row of the results by using an index: first_row first_row = results[0] # Print the first row of the results print(first_row) # Print the first column of the first row by using an index print(first_row[0]) # Print the 'state' column of the first row by using its name print(first_row['state'])
/Datacamp/stack_unstack.py
#stack does something similar to pivot using the indices # Unstack users by 'weekday': byweekday users = visitors signups city weekday Austin Mon 326 3 Sun 139 7 Dallas Mon 456 5 Sun 237 12 byweekday = users.unstack(level = 'weekday') # Print the byweekday DataFrame print(byweekday) visitors signups weekday Mon Sun Mon Sun city Austin 326 139 3 7 Dallas 456 237 5 12 # Stack byweekday by 'weekday' and print it print(byweekday.stack(level = 'weekday')) visitors signups city weekday Austin Mon 326 3 Sun 139 7 Dallas Mon 456 5 Sun 237 12 # Stack 'city' back into the index of bycity: newusers newusers = bycity.stack(level = "city") # Swap the levels of the index of newusers: newusers newusers = newusers.swaplevel(0,1) # Print newusers and verify that the index is not sorted print(newusers) # Sort the index of newusers: newusers newusers = newusers.sort_index()
/Datacamp/tidy_data.py
#Melting data is the process of turning columns of your data into rows of data. airquality_melt = pd.melt(airquality_melt, id_vars=['Month', 'Day']) #id_vars = columns not wishing to melt #value_vars = columns wishing to melt (deafult to all not in id_vars) #Pivoting data is the opposite of melting it. airquality_pivot = airquality_melt.pivot_table(index=["Month", "Day"], columns="measurement", values="reading") #columns="measurement" : columns to pivot #values="reading" : values to fill columns with #the above create a heirarchical header format. To fix this: airquality_pivot_reset = airquality_pivot.reset_index() #Often there are duplicate values, these can be handled as follows: airquality_pivot = airquality_dup.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading', aggfunc=np.mean) #where the mean is taken #Note in the below that Series atributes and functions are accessed on the .str function # Melt ebola: ebola_melt ebola_melt = pd.melt(ebola, id_vars=['Date', 'Day'], var_name='type_country', value_name='counts') # Create the 'str_split' column ebola_melt['str_split'] = ebola_melt.type_country.str.split('_') # Create the 'type' column ebola_melt['type'] = ebola_melt.str_split.str.get(0) # Create the 'country' column ebola_melt['country'] = ebola_melt.str_split.str.get(1) # Print the head of ebola_melt print(ebola_melt.head())
/Datacamp/twitter_example.py
# Import package import tweepy # Store OAuth authentication credentials in relevant variables access_token = "1092294848-aHN7DcRP9B4VMTQIhwqOYiB14YkW92fFO8k8EPy" access_token_secret = "X4dHmhPfaksHcQ7SCbmZa2oYBBVSD2g8uIHXsp5CTaksx" consumer_key = "nZ6EA0FxZ293SxGNg8g8aP0HM" consumer_secret = "fJGEodwe3KiKUnsYJC3VRndj7jevVvXbK2D5EiJ2nehafRgA6i" # Pass OAuth details to tweepy's OAuth handler auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) #################################################### #Need to define a Stream Listener class class MyStreamListener(tweepy.StreamListener): def __init__(self, api=None): super(MyStreamListener, self).__init__() self.num_tweets = 0 self.file = open("tweets.txt", "w") def on_status(self, status): tweet = status._json self.file.write( json.dumps(tweet) + '\n' ) self.num_tweets += 1 if self.num_tweets < 100: return True else: return False self.file.close() def on_error(self, status): print(status) ##################################################### # Initialize Stream listener l = MyStreamListener() # Create your Stream object with authentication stream = tweepy.Stream(auth, l) # Filter Twitter Streams to capture data by the keywords: stream.filter(track = ['clinton', 'trump', 'sanders', 'cruz']) #Once the twitter data is sitting locally: # Import package import json # String of path to file: tweets_data_path tweets_data_path = "tweets.txt" # Initialize empty list to store tweets: tweets_data tweets_data = [] # Open connection to file tweets_file = open(tweets_data_path, "r") # Read in tweets and store in list: tweets_data for line in tweets_file: tweet = json.loads(line) tweets_data.append(tweet) # Close connection to file tweets_file.close() # Import package import pandas as pd # Build DataFrame of tweet texts and languages df = pd.DataFrame(tweets_data, columns=['text', 'lang']) # Print head of DataFrame print(df.head())
/Datacamp/web_import.py
# Import package from urllib.request import urlretrieve # Import pandas import pandas as pd # Assign url of file: url url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv' # Save file locally urlretrieve(url, 'winequality-red.csv') # Read file into a DataFrame and print its head df = pd.read_csv('winequality-red.csv', sep=';') #Alternatively df = pd.read_csv(url, sep = ";") #does not save the file locally #If file is an excel file xl = pd.read_excel(url, sheetname = None) # Print the sheetnames to the shell print(xl.keys()) # Print the head of the first sheet (using its name, NOT its index) print(xl['1700'].head()) ##HTTP requests # Import packages from urllib.request import urlopen, Request # Specify the url url = "http://www.datacamp.com/teach/documentation" # This packages the request: request request = Request(url) # Sends the request and catches the response: response response = urlopen(request) # Print the datatype of response print(type(response)) # Extract the response: html html = response.read() # Be polite and close the response! response.close() #The requests package simplifies this: # Import package import requests # Specify the url: url url = "http://www.datacamp.com/teach/documentation" # Packages the request, send the request and catch the response: r r = requests.get(url) # Extract the response: text text = r.text #NO NEED TO CLOSE # Print the html print(text)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
KagenLH/forme-app
refs/heads/main
{"/app/api/form_routes.py": ["/app/models/form.py", "/app/models/field.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/seeds/fields.py": ["/app/models/form.py", "/app/models/field.py"], "/app/seeds/forms.py": ["/app/models/form.py"]}
└── ├── app │ ├── api │ │ ├── field_routes.py │ │ └── form_routes.py │ ├── forms │ │ └── signup_form.py │ ├── models │ │ ├── __init__.py │ │ ├── field.py │ │ └── form.py │ └── seeds │ ├── fields.py │ ├── forms.py │ └── users.py └── migrations └── versions ├── 20210816_135552_.py ├── 20210820_100009_.py ├── 20210820_100524_.py ├── 20210820_103408_.py ├── 20210820_133516_.py ├── 20210820_171546_.py ├── 20210821_113310_.py ├── 20210821_145311_.py └── 20210821_161057_.py
/app/api/field_routes.py
# from flask import Blueprint, jsonify, request # from flask_login import login_required # from app.models import Field, db # field_routes = Blueprint('fields', __name__) # @field_routes.route('/', methods=['POST']) # def fields(): # if request.method == 'POST': # # get fields data from request body # data = request.get_json() # form_fields = [] # for field_info in data: # field = Field( # type=field_info["type"], # label=field_info["label"], # max_length=field_info["max_length"], # required=field_info["required"], # placeholder=field_info["placeholder"], # instructions=field_info["instructions"], # choices=field_info["choices"], # form_id=field_info["form_id"] # ) # # db.session.add(field) # form_fields.append(field) # # adds each instance individually, so list format is ok # db.session.add_all(form_fields) # db.session.commit() # # must return dictionary, tuple, or string # return {"fields": [field.to_dict for field in form_fields]} # @field_routes.route('/forms/<int:id>') # def form_fields(id): # fields = Field.query.filter_by(form_id=id).all() # return {'fields': [field.to_dict for field in fields]}
/app/api/form_routes.py
from flask import Blueprint, jsonify, request, session from flask_login import login_required, current_user from app.models import Form, db, Field form_routes = Blueprint("forms", __name__) # get all forms --- remove this route? @form_routes.route('/') # @login_required def get_forms(): forms = Form.query.all() # original query for ALL forms return {'forms': [form.to_dict() for form in forms]} @form_routes.route('/<int:id>', methods=['GET', 'DELETE']) @login_required def forms(id): # get a specific form by primary key if request.method == 'GET': form = Form.query.get(id) return form.to_dict() # delete a specific form by primary key elif request.method == 'DELETE': form = Form.query.get(id) # takes a form's id db.session.delete(form) db.session.commit() return form.to_dict() # (GET) allow user to access a form without being logged in, i.e. SHARED form # @form_routes.route('/<int:id>/shared') # def shared_form(id): # form = Form.query.get(id) # return form.to_dict() # get forms by owner_id (i.e. all forms owned by a specific user) @form_routes.route('/users/<int:id>') def user_forms(id): # takes a user's id forms = Form.query.filter_by(owner_id=id).all() # destructure in forms store return {'forms': [form.to_dict() for form in forms]} @form_routes.route('/build', methods=['POST']) @login_required def create_form(): # print('***** REQUEST DATA INFO *****', request.get_json()) user_id = session['_user_id'] # pull JSON data from request body data = request.get_json() form_fields = [] form = Form( title=data["title"], owner_id=user_id, description=data["description"], label_placement=data["labelPlacement"], description_align=data["descriptionAlignment"], title_align=data["titleAlignment"], ) db.session.add(form) db.session.commit() # print('FORM FORM FORM:', form) for field_info in data["fields"]: # all of the columns in the fields table (except id) expected_keys = [ "type", "label", "maxLength", "required", "placeholder", "instructions", "choices" ] # check whether field_info["maxLength"] exists if "maxLength" in field_info: # convert the value from string to integer field_info["maxLength"] = int(field_info["maxLength"]) for key in expected_keys: if key not in field_info: # create the key and set the default value to None field_info.setdefault(key) # print('******* FIELD INFO ********', field_info) field_choices = field_info['choices'] choices_string = "" for choice in field_choices: choices_string += (str(choice) + '&&') field = Field( type=field_info["type"], label=field_info["label"], max_length=field_info["maxLength"], required=field_info["required"], placeholder=field_info["placeholder"], instructions=field_info["instructions"], choices=choices_string, form=form # handles the form_id ) # db.session.add(field) form_fields.append(field) db.session.add_all(form_fields) db.session.commit() # test_form = Form.query.filter_by(title='To Test Fields').first() # print("*** FORM.FIELDS ***", type(test_form.fields)) # print("*** FIELD.FORMS ***", form_fields[0].form) # # ...so we can use the dict.update() method # return_form = form.to_dict() # # add an entry in 'form' contaning its related fields # return_form.update({"fields": [field.to_dict() for field in form_fields]}) # print('**** FORM WITH FIELDS ****', form.to_dict()) return form.to_dict() @form_routes.route('/<int:id>', methods=['PUT']) @login_required def edit_form(id): form = Form.query.get(id) if form: if form.owner_id == current_user.id: data = request.get_json() form.title= data["title"] form.description= data["description"] form.label_placement= data["labelPlacement"] form.description_align= data["descriptionAlignment"] form.title_align= data["titleAlignment"] # Remove any fields on the form that previously existed for field in form.fields: db.session.delete(field) db.session.commit() # Re-add all the fields to the form form_fields = [] for field_info in data["fields"]: # all of the columns in the fields table (except id) expected_keys = [ "type", "label", "maxLength", "required", "placeholder", "instructions", "choices" ] # check whether field_info["maxLength"] exists if "maxLength" in field_info: # convert the value from string to integer field_info["maxLength"] = int(field_info["maxLength"]) for key in expected_keys: if key not in field_info: # create the key and set the default value to None field_info.setdefault(key) # print('******* FIELD INFO ********', field_info) field_choices = field_info['choices'] choices_string = "" for choice in field_choices: choices_string += (str(choice) + '&&') field = Field( type=field_info["type"], label=field_info["label"], max_length=field_info["maxLength"], required=field_info["required"], placeholder=field_info["placeholder"], instructions=field_info["instructions"], choices=choices_string, form=form # handles the form_id ) # db.session.add(field) form_fields.append(field) db.session.add_all(form_fields) db.session.commit() return form.to_dict() else: return "You do not own the form you are trying to edit.", 401 else: return "The form you're trying to edit does not exist.", 400 # ! currently causes error "405 method not allowed" # ! when not bundled with `user_forms(id)` above # delete a specific form by primary key # @form_routes.route('/<int:id>', methods=['DELETE']) # def delete_form(id): # if request.method == 'DELETE': # form = Form.query.get(id) # db.session.delete(form) # db.session.commit() # return form.to_dict() # @form_routes.route('/<int:id>') # def get_form(id): # form = Form.query.filter(Form.id == id).first() # # fields = Field.query.filter(Field.form_id == form.id).all() # print('FORM IS HERE!!! ', form.to_dict()) # # print('FIELD IS HERE!!!!! ***', # # {'fields': [field.to_dict() for field in fields]}) # # form["fields"] = {'fields': [field.to_dict() for field in fields]} # return form.to_dict() @form_routes.route('/<int:id>/shared', methods=['GET']) @login_required def get_share_forms(id): # get a specific form by primary key if request.method == 'GET': form = Form.query.get(id) print('FORM CHOICES!!!!!!', form) return form.to_dict()
/app/forms/signup_form.py
from flask_wtf import FlaskForm from wtforms import StringField, PasswordField from wtforms.validators import Email, ValidationError, InputRequired, Length, EqualTo from app.models import User def user_exists(form, field): # Checking if user exists email = field.data user = User.query.filter(User.email == email).first() if user: raise ValidationError('Email address is already in use.') def username_exists(form, field): # Checking if username is already in use username = field.data user = User.query.filter(User.username == username).first() if user: raise ValidationError('Username is already in use.') class SignUpForm(FlaskForm): username = StringField( 'username', validators=[InputRequired(message='Input Required'), Length(max=40, message='Must be less than 40 characters'), username_exists]) email = StringField('email', validators=[InputRequired(), Length( max=40, message='Must be less than 40 characters'), Email(message='Invalid'), user_exists]) password = PasswordField('password', validators=[ InputRequired(), EqualTo('confirm', message='Passwords must match')]) confirm = PasswordField('confirm')
/app/models/__init__.py
from .db import db from .user import User from .form import Form from .field import Field
/app/models/field.py
from .db import db class Field(db.Model): __tablename__ = 'fields' id = db.Column(db.Integer, primary_key=True) type = db.Column(db.String(255), nullable=False) label = db.Column(db.String(55), nullable=False) max_length = db.Column(db.Integer) required = db.Column(db.Boolean, nullable=False) placeholder = db.Column(db.String(255)) instructions = db.Column(db.String(255)) choices = db.Column(db.Text) form_id = db.Column(db.Integer, db.ForeignKey("forms.id")) # forms = db.relationship("Form", foreign_keys=form_id, lazy="joined") # redundant def to_dict(self): return { 'id': self.id, 'form_id': self.form_id, 'type': self.type, 'label': self.label, 'max_length': self.max_length, 'required': self.required, 'placeholder': self.placeholder, 'instructions': self.instructions, # splits choices into a list, removes empty list entry at the end 'choices': self.choices[:-2].split('&&') }
/app/models/form.py
from .db import db class Form(db.Model): __tablename__ = 'forms' id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String(50), nullable=False) owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) description = db.Column(db.Text) label_placement = db.Column(db.String(10)) description_align = db.Column(db.String(10)) title_align = db.Column(db.String(10)) # creates a pseudo-column (you won't see it) in the 'fields' table called 'form' that can be assigned a Form instance when creating a Field instance -- 'form' is not the name of this table fields = db.relationship('Field', backref='form') # field_id = db.Column(db.Integer, db.ForeignKey('fields.id')) # fields = db.relationship("Field", foreign_keys=field_id ,back_populates="forms", lazy="joined") def to_dict(self): # convert associated fields to serializable dictionaries form_fields = [field.to_dict() for field in self.fields] return { 'id': self.id, 'fields': form_fields, 'title': self.title, 'owner_id': self.owner_id, 'description': self.description, 'label_placement': self.label_placement, 'description_align': self.description_align, 'title_align': self.title_align } def __repr__(self): return str(self.to_dict())
/app/seeds/fields.py
from app.models import db, Field from app.models import Form def seed_fields(): form = Form( title='To Test Fields', owner_id=1 ) db.session.add(form) testField = Field( type="text", label="Test Field", required=False, form=form, # creates the form_id / association choices='Some Stuff&&Another choice&&Hello from hell&&' ) db.session.add(testField) db.session.commit() def undo_fields(): db.session.execute('TRUNCATE fields RESTART IDENTITY CASCADE;') db.session.commit()
/app/seeds/forms.py
from app.models import db, Form def seed_forms(): test = Form( title = "Test Form Render", owner_id = 1, description = "", label_placement = "", description_align = "", title_align = "", ) db.session.add(test) db.session.commit() def undo_forms(): db.session.execute('TRUNCATE forms RESTART IDENTITY CASCADE;') db.session.commit()
/app/seeds/users.py
from app.models import db, User # Adds a demo user, you can add other users here if you want def seed_users(): demo = User( username='Demo', email='demo@aa.io', password='password') marnie = User( username='marnie', email='marnie@aa.io', password='password') bobbie = User( username='bobbie', email='bobbie@aa.io', password='password') db.session.add(demo) db.session.add(marnie) db.session.add(bobbie) db.session.commit() # Uses a raw SQL query to TRUNCATE the users table. # SQLAlchemy doesn't have a built in function to do this # TRUNCATE Removes all the data from the table, and RESET IDENTITY # resets the auto incrementing primary key, CASCADE deletes any # dependent entities def undo_users(): db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;') db.session.commit()
/migrations/versions/20210816_135552_.py
"""empty message Revision ID: fa590b961f4f Revises: ffdc0a98111c Create Date: 2021-08-16 13:55:52.581549 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'fa590b961f4f' down_revision = 'ffdc0a98111c' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('forms', sa.Column('id', sa.Integer(), nullable=False), sa.Column('title', sa.String(length=50), nullable=True), sa.Column('owner_id', sa.Integer(), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('label_align', sa.String(length=10), nullable=True), sa.Column('description_align', sa.String(length=10), nullable=True), sa.Column('title_align', sa.String(length=10), nullable=True), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('forms') # ### end Alembic commands ###
/migrations/versions/20210820_100009_.py
"""empty message Revision ID: beeeac90e4ba Revises: d25f4d1b7ea0 Create Date: 2021-08-20 10:00:09.924819 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'beeeac90e4ba' down_revision = 'd25f4d1b7ea0' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'required', existing_type=sa.BOOLEAN(), nullable=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'required', existing_type=sa.BOOLEAN(), nullable=False) # ### end Alembic commands ###
/migrations/versions/20210820_100524_.py
"""empty message Revision ID: b8ec5632d693 Revises: beeeac90e4ba Create Date: 2021-08-20 10:05:24.638509 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b8ec5632d693' down_revision = 'beeeac90e4ba' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'label', existing_type=sa.VARCHAR(length=55), nullable=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'label', existing_type=sa.VARCHAR(length=55), nullable=True) # ### end Alembic commands ###
/migrations/versions/20210820_103408_.py
"""empty message Revision ID: b05fdd14ae4f Revises: 4563136888fd Create Date: 2021-08-20 10:34:08.171553 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b05fdd14ae4f' down_revision = '4563136888fd' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'label', existing_type=sa.VARCHAR(length=55), nullable=True) op.alter_column('fields', 'required', existing_type=sa.BOOLEAN(), nullable=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'required', existing_type=sa.BOOLEAN(), nullable=True) op.alter_column('fields', 'label', existing_type=sa.VARCHAR(length=55), nullable=False) # ### end Alembic commands ###
/migrations/versions/20210820_133516_.py
"""empty message Revision ID: b3e721c02f48 Revises: 9aec744a6b98 Create Date: 2021-08-20 13:35:16.871785 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b3e721c02f48' down_revision = '9aec744a6b98' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'form_id', existing_type=sa.INTEGER(), nullable=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'form_id', existing_type=sa.INTEGER(), nullable=True) # ### end Alembic commands ###
/migrations/versions/20210820_171546_.py
"""empty message Revision ID: 94f5eda37179 Revises: b3e721c02f48 Create Date: 2021-08-20 17:15:46.455809 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '94f5eda37179' down_revision = 'b3e721c02f48' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'label', existing_type=sa.VARCHAR(length=55), nullable=False) op.alter_column('forms', 'title', existing_type=sa.VARCHAR(length=50), nullable=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('forms', 'title', existing_type=sa.VARCHAR(length=50), nullable=True) op.alter_column('fields', 'label', existing_type=sa.VARCHAR(length=55), nullable=True) # ### end Alembic commands ###
/migrations/versions/20210821_113310_.py
"""empty message Revision ID: d0c387e43ca4 Revises: 94f5eda37179 Create Date: 2021-08-21 11:33:10.206199 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd0c387e43ca4' down_revision = '94f5eda37179' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('forms', sa.Column('field_id', sa.Integer(), nullable=True)) op.create_foreign_key(None, 'forms', 'fields', ['field_id'], ['id']) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(None, 'forms', type_='foreignkey') op.drop_column('forms', 'field_id') # ### end Alembic commands ###
/migrations/versions/20210821_145311_.py
"""empty message Revision ID: 2453c767d036 Revises: d0c387e43ca4 Create Date: 2021-08-21 14:53:11.208418 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '2453c767d036' down_revision = 'd0c387e43ca4' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey') op.drop_column('forms', 'field_id') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id']) # ### end Alembic commands ###
/migrations/versions/20210821_161057_.py
"""empty message Revision ID: 4df12f583573 Revises: 2453c767d036 Create Date: 2021-08-21 16:10:57.556468 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4df12f583573' down_revision = '2453c767d036' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'form_id', existing_type=sa.INTEGER(), nullable=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.alter_column('fields', 'form_id', existing_type=sa.INTEGER(), nullable=False) # ### end Alembic commands ###
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
folmez/Handsfree-KGS
refs/heads/master
{"/src/__init__.py": ["/src/picture_actions.py", "/src/screenshot_actions.py", "/src/mouse_actions.py"]}
└── ├── auto_goban_detection.py ├── make_goban_speak.py ├── play_handsfree_GO.py ├── setup.py ├── src │ ├── __init__.py │ ├── cam_actions.py │ ├── mouse_actions.py │ ├── picture_actions.py │ └── screenshot_actions.py ├── temp │ ├── plot_save_coordinates_on_click.py │ └── process_pyhsical_goban_pic.py └── tests ├── test_mouse_actions.py ├── test_picture_actions.py ├── test_play_handsfree_GO.py └── test_screenshot_actions.py
/auto_goban_detection.py
import matplotlib.pyplot as plt import imageio import numpy as np import src IMG_PATH = 'images/empty_pyshical_goban1.png' board_corners = [] def onclick(event): print(event.xdata, event.ydata) board_corners.append((event.xdata, event.ydata)) # Get RGB matrix of the picture with goban rgb = imageio.imread(IMG_PATH) fig = plt.figure() plt.imshow(rgb) plt.title("Please click on UL-UR-BL-BR corners...") fig.canvas.mpl_connect('button_press_event', onclick) plt.show() UL_outer_x, UL_outer_y = board_corners[0] UR_outer_x, UR_outer_y = board_corners[1] BL_outer_x, BL_outer_y = board_corners[2] BR_outer_x, BR_outer_y = board_corners[3] # Remove non-goban part from the RGB matrix and make it a square matrix rgb = src.rescale_pyhsical_goban_rgb(rgb, \ UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y) # Find the indices of board points in the new square RGB matrix x_idx, y_idx = src.find_board_points(rgb, plot_stuff=True) # Mark board points src.mark_board_points(rgb, x_idx, y_idx) #bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)] #src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy) #red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) #bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th) #src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \ # 'black', 16,4)
/make_goban_speak.py
import src import time UL_x, UL_y, goban_step = src.get_goban_corners() prev_stone_set = set() print("Started scanning the board for moves every 5 seconds...") while True: # wait between screenshots time.sleep(5) # get board screenshot board_rgb_screenshot = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step) # find the stones on the board current_stone_set = src.get_goban_state(board_rgb_screenshot) # is there a new stone on the board? if current_stone_set > prev_stone_set: # find the new stone stone = current_stone_set - prev_stone_set # IN THE FUTURE, ALLOW FOR OPPONENT TO MAKE A QUICK MOVE!!! assert len(stone) == 1 # say the new moves on the board player = list(stone)[0][0] # 1-black, 2-white i, j = list(stone)[0][1], list(stone)[0][2] pos = src.int_coords_to_str(i,j) if player==1: update_msg = "Black played at " + pos elif player==2: update_msg = "White played at " + pos print(update_msg) prev_stone_set = current_stone_set else: print("No moves made!")
/play_handsfree_GO.py
from pynput.mouse import Button, Controller import cv2 import imageio import matplotlib.pyplot as plt import threading import time import queue import os import numpy as np import src frames = queue.Queue(maxsize=10) class frameGrabber(threading.Thread): def __init__(self): # Constructor threading.Thread.__init__(self) def run(self): cam = cv2.VideoCapture(0) img_counter = 0 while True: ret, frame = cam.read() if not ret: break img_name = f"images/game_log/opencv_frame_{img_counter}.png" cv2.imwrite(img_name, frame) print("{} written!".format(img_name)) frames.put(img_counter) img_counter += 1 time.sleep(30) cam.release() def verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, color, i, j): # Display a message to the user to put a stone print(f"\nPlease put a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}...") # Assert the stone with desired color is on the goban at the exact spot while True: time.sleep(5) frame_num = frames.get() img_name = f"images/game_log/opencv_frame_{frame_num}.png" rgb = imageio.imread(img_name) plt.imshow(rgb) plt.title(f"This board should have a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}.") plt.show() ans = input(f"Did you put a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}? [y/n]: ") if ans is 'y': rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \ red_scale_th, blue_scale_th, color, i, j, plot_stuff=True) remove_this_frame(img_name) frames.task_done() remove_unused_frames() break else: remove_this_frame(img_name) frames.task_done() def remove_this_frame(img_name): os.remove(img_name) print('Frame', img_name, 'removed.') def remove_unused_frames(): print('Removing unused frames...') while True: time.sleep(1) try: frame_num = frames.get(False) except queue.Empty: # Handle empty queue here break else: # Handle task here and call q.task_done() frame_num = frames.get() img_name = f"images/game_log/opencv_frame_{frame_num}.png" remove_this_frame(img_name) frames.task_done() print('Unused frames removed...') board_corners = [] def onclick(event): print(event.xdata, event.ydata) board_corners.append(event.xdata) board_corners.append(event.ydata) if __name__ == '__main__': # Initiate the frame grabber thread for goban pictures my_frame_grabber = frameGrabber() # Start running the threads! my_frame_grabber.start() print('Frame grabbing has started...') # MANUAL BOARD EDGE DETECTION FOR THE PYHSICAL BOARD # Show a plot frames and ask user to input boundaries while True: time.sleep(5) frame_num = frames.get() img_name = f"images/game_log/opencv_frame_{frame_num}.png" rgb = imageio.imread(img_name) fig = plt.figure() plt.imshow(rgb) plt.title("Please click on UL-UR-BL-BR corners or close plot...") fig.canvas.mpl_connect('button_press_event', onclick) plt.show() if not board_corners: # Skip if nothing is clicked remove_this_frame(img_name) frames.task_done() else: # Read goban corners ob = board_corners assert ob[2] > ob[0] and ob[6] > ob[4] and \ ob[7] > ob[4] and ob[5] > ob[1] # Remove this filename as it served its purpose and break out of loop remove_this_frame(img_name) frames.task_done() break # Remove all unused frames at the end remove_unused_frames() # Remove non-goban part from the RGB matrix and make it a square matrix rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) # Find the indices of board points in the new square RGB matrix x_idx, y_idx = src.find_board_points(rgb, plot_stuff=False) # CALIBRATION OF PYHSICAL BOARD # Ask the user to put black and white stones on the board print('\nPlease put black stones on corners and a white stone at center') bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)] while True: time.sleep(5) frame_num = frames.get() img_name = f"images/game_log/opencv_frame_{frame_num}.png" rgb = imageio.imread(img_name) plt.imshow(rgb) plt.title('Did you put black on corners and white at center?') plt.show() ans = input('Did you put black stones on corners and a white stone at center? [y/n]: ') if ans is 'y': # Remove non-goban part from the RGB matrix and make it a square matrix rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) # Calibrate red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) # Refind stones using the above thresholds bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \ red_scale_th1, blue_scale_th1, plot_stuff=False) remove_this_frame(img_name) frames.task_done() remove_unused_frames() break else: remove_this_frame(img_name) frames.task_done() print('\nPlease put white stones on corners and a black stone at center') wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)] while True: time.sleep(5) frame_num = frames.get() img_name = f"images/game_log/opencv_frame_{frame_num}.png" rgb = imageio.imread(img_name) plt.imshow(rgb) plt.title('Did you put white on corners and black at center?') plt.show() ans = input('Did you put white stones on corners and a black stone at center? [y/n]: ') if ans is 'y': # Remove non-goban part from the RGB matrix and make it a square matrix rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) # Calibrate red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) # Refind stones using the above thresholds bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \ red_scale_th2, blue_scale_th2, plot_stuff=False) remove_this_frame(img_name) frames.task_done() remove_unused_frames() break else: remove_this_frame(img_name) frames.task_done() red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2) blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2) # VERIFY CALIBRATION OF PHYSICAL BOARD print(' [PLEASE KEEP IN MIND THAT YOUR LOWER-LEFT CORNER IS (1,1)]') verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'black', 3, 4) verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'white', 1, 1) verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'black', 10, 10) verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'white', 19, 19) print("CALIBRATION IS VERIFIED\n" + 50*"-") # DIGITAL BOARD DETECTION # Ask the user to open a KGS board print('\n OPEN A KGS BOARD/GAME NOW') input('ENTER when the digital board is open: ') # Get the user to click on come corners to get to know the digital board UL_x, UL_y, goban_step = src.get_goban_corners() # Test by moving to the star points on the board for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']: i, j = src.str_to_integer_coordinates(str) x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step) src.make_the_move(mouse, x, y, no_click=True) # START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD # Plan - 1) check frames continously until a move is made by you # 2) check digital board until a move is made by your opponent # First, remove all unused frames remove_unused_frames() # Scan the frames for moves every five seconds mouse = Controller() # obtain mouse controller bxy, wxy = [], [] # empty board in the beginning while True: time.sleep(5) frame_num = frames.get() img_name = f"images/game_log/opencv_frame_{frame_num}.png" color, i, j = src.scan_next_move(img_name, ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, bxy, wxy) if color is not None: # Play the move and update the stone lists bxy, wxy = src.play_next_move_on_digital_board(mouse, color, \ i, j, bxy, wxy, UL_x, UL_y, goban_step) # Start checking the digital board for new moves else: # Remove this frame and start waiting for the next frame remove_this_frame(img_name) frames.task_done() # Wait for the threads to finish... my_frame_grabber.join() print('Main Terminating...')
/setup.py
from setuptools import setup, find_packages setup( name='Handsfree-KGS', version='0.0', description='Pay Handsfree Go on KGS', author='Fatih Olmez', author_email='folmez@gmail.com', packages=find_packages())
/src/__init__.py
from .mouse_actions import get_goban_corners, str_to_integer_coordinates from .mouse_actions import int_coords_to_screen_coordinates, make_the_move from .mouse_actions import int_coords_to_str from .screenshot_actions import KGS_goban_rgb_screenshot, get_digital_goban_state from .picture_actions import plot_goban_rgb, average_RGB, make_indices_agree from .picture_actions import return_int_pnts, subtract_rolling_sum from .picture_actions import rolling_sum, find_custom_local_minima from .picture_actions import mark_board_points, is_this_stone_on_the_board from .picture_actions import mark_stones, calibrate from .picture_actions import find_board_points, rescale_pyhsical_goban_rgb from .picture_actions import get_pyhsical_board_outer_corners from .picture_actions import convert_physical_board_ij_to_str from .picture_actions import play_next_move_on_digital_board, scan_next_move
/src/cam_actions.py
import imageio def get_pyhsical_goban_state(rgb_pix): pass def picture_to_rgb(path): return misc.imageio(path)
/src/mouse_actions.py
from pynput.mouse import Button, Controller import src import time def get_goban_corners(): # Obtain mouse controller mouse = Controller() # Ask the user to define goban corners print('Move cursor to upper-left (A19) corner of Goban and keep it there five seconds') time.sleep(5) (UL_x, UL_y) = mouse.position print(f"Upper-Left: ({UL_x},{UL_y})") print() print('Move cursor to bottom-right (T1) corner of Goban and keep it there five seconds') time.sleep(5) (BR_x, BR_y) = mouse.position print(f"Bottom-Right: ({BR_x},{BR_y})") print() # Compute goban step sizes goban_step = 0.5 * (BR_x - UL_x) * 1/18 + 0.5 * (BR_y - UL_y) * 1/18 print(f"Goban-steps is {goban_step}") return UL_x, UL_y, goban_step def make_the_move(mouse, x, y, no_click=False): (cx, cy) = mouse.position time.sleep(0.5) mouse.move(x - cx, y - cy) time.sleep(0.2) if not no_click: mouse.click(Button.left, 1) def int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step): x = UL_x + (i-1) * goban_step y = UL_y + (j-1) * goban_step return x, y def str_to_integer_coordinates(str): # Upper-lef corner is 1,1 and Bottom-right corner is 19,19 # Goban boards skip the letter I j = 19 - int(str[1:3]) + 1 if ord(str[0]) < ord('I'): i = ord(str[0]) - ord('A') + 1 else: i = ord(str[0]) - ord('A') return i,j def int_coords_to_str(i, j): # Upper-lef corner is 1,1 and Bottom-right corner is 19,19 # Goban boards skip the letter I if i <= ord('I') - ord('A'): return chr(ord('A') + i-1) + f"{20-j}" else: return chr(ord('A') + i) + f"{20-j}"
/src/picture_actions.py
import matplotlib.pyplot as plt import numpy as np from scipy.signal import argrelmin import imageio import src def play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \ UL_x, UL_y, goban_step): if color is not None: print(f"New move: {color} played at {convert_physical_board_ij_to_str(i,j)}") if color is 'black': bxy.append((i,j)) elif color is 'white': wxy.append((i,j)) # make the move x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step) src.make_the_move(mouse, x, y) return bxy, wxy def convert_physical_board_ij_to_str(i,j): """ The pyhsical board will have the upper-left corner labeled as (1,1) and the bottom-right corner labeled as (19,19). This little script will help translate and correct and misalignment between the here-described labeling and the algorithm. """ return f"({i},{j})" def scan_next_move(img_name, ob, x_idx, y_idx, red_scale_th, blue_scale_th, \ bxy, wxy, plot_stuff=False): rgb = imageio.imread(img_name) rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \ red_scale_th, blue_scale_th, plot_stuff=plot_stuff) print(bxy ,wxy, bxy_new, wxy_new) if set(bxy_new) == set(bxy) and set(wxy_new) == set(wxy): color, i, j = None, None, None print('No new moves') elif len(set(bxy_new)-set(bxy)) == 1 and set(wxy_new) == set(wxy): color = 'black' [(i,j)] = list(set(bxy_new)-set(bxy)) elif len(set(wxy_new)-set(wxy)) == 1 and set(bxy_new) == set(bxy): color = 'white' [(i,j)] = list(set(wxy_new)-set(wxy)) else: raise ValueError('Move scanner error!') return color, i, j BOARD_CORNERS = [] def onclick(event): print(event.xdata, event.ydata) BOARD_CORNERS.append((event.xdata, event.ydata)) def get_pyhsical_board_outer_corners(img_name): rgb = imageio.imread(img_name) fig = plt.figure() plt.imshow(rgb) plt.title("Please click on UL-UR-BL-BR corners or close plot...") fig.canvas.mpl_connect('button_press_event', onclick) plt.show() UL_outer_x, UL_outer_y = BOARD_CORNERS[0] UR_outer_x, UR_outer_y = BOARD_CORNERS[1] BL_outer_x, BL_outer_y = BOARD_CORNERS[2] BR_outer_x, BR_outer_y = BOARD_CORNERS[3] return UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y def find_board_points(rgb, plot_stuff=False): """ You have the RGB matrix of the goban as a square matrix but you don't know which entries correspon to the points on the board. This code finds the board points by plotting average red, green and blue scales and calculating the 19 local minima. Why? Because board points are intersections of black lines and RGB value of black color is [0,0,0]. """ if plot_stuff: plt.subplot(221) plt.imshow(rgb) plt.subplot(222) x1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=0), 'r', plot_stuff) plt.subplot(223) x2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=0), 'g', plot_stuff) plt.subplot(224) x3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=0), 'b', plot_stuff) plt.show() plt.subplot(221) plt.imshow(rgb) plt.subplot(222) y1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=1), 'r', plot_stuff) plt.subplot(223) y2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=1), 'g', plot_stuff) plt.subplot(224) y3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=1), 'b', plot_stuff) plt.show() else: x1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=0), 'r', plot_stuff) x2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=0), 'g', plot_stuff) x3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=0), 'b', plot_stuff) y1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=1), 'r', plot_stuff) y2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=1), 'g', plot_stuff) y3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=1), 'b', plot_stuff) # Sometimes indices found by red, green and blue scales don't agree x_idx = src.make_indices_agree(x1_idx, x2_idx, x3_idx) y_idx = src.make_indices_agree(y1_idx, y2_idx, y3_idx) return x_idx, y_idx def rescale_pyhsical_goban_rgb(rgb, ob): # Get outer boundaries from ob UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = ob # Rescale to n by n matrix n = 300 # find n points on the left and on the right boundaries x_left_vals, y_left_vals, rgb, _ = \ src.return_int_pnts(n, rgb, BL_outer_x, BL_outer_y, UL_outer_x, UL_outer_y) x_right_vals, y_right_vals, rgb, _ = \ src.return_int_pnts(n, rgb, BR_outer_x, BR_outer_y, UR_outer_x, UR_outer_y) # Calculate a new RGB matrix only for the board, by removing outside the board new_rgb = np.zeros([n,n,3]) for i in range(n): x1, y1 = x_left_vals[i], y_left_vals[i] x2, y2 = x_right_vals[i], y_right_vals[i] # print((x1,y1), (x2,y2)) _, _, rgb, v = src.return_int_pnts(n, rgb, x1, y1, x2, y2) for j in range(n): new_rgb[n-i-1, j, :] = v[j] return new_rgb.astype(np.uint8) def plot_goban_rgb(rgb, bxy=[], wxy=[]): plt.imshow(rgb) plt.ylabel('1st index = 1, ..., 19') plt.xlabel('2nd index = 1, ..., 19') plt.show() def average_RGB(rgb, xMAX, yMAX, x, y, w): # Calculates average RGB around a board point for stone detection xL, xR = np.maximum(0, x-w), np.minimum(x+w+1, xMAX-1) yL, yR = np.maximum(0, y-w), np.minimum(y+w+1, yMAX-1) red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0])) green_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 1])) blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2])) return [red_scale, green_scale, blue_scale] def make_indices_agree(x1, x2, x3): # Board points are determined from local extrema of R,G,B values. # But sometimes they don't match. In that case, choose the one whose # second difference looks like a constant a1 = np.amax(abs(np.diff(np.diff(x1)))) a2 = np.amax(abs(np.diff(np.diff(x2)))) a3 = np.amax(abs(np.diff(np.diff(x3)))) x = 0 x = x1 if a1 <= a2 and a1 <= a3 else x x = x2 if a2 <= a1 and a2 <= a3 else x x = x3 if a3 <= a1 and a3 <= a2 else x assert x is not 0 return x def calibrate(rgb, x_idx, y_idx, bxy=[], wxy=[]): """ Depending on light, laptop angle etc. the board may have different RGB values at different times. So how do we distinguis black and white stones? RGB of black = [0,0,0] RGB of white = [255,255,255] We will use red scale to distinguish black stones and blue scale to distinguish white stones. """ xMAX, yMAX, _ = rgb.shape roll_w = int(np.round(0.01*xMAX)) # BLACK STONE CALIBRATION # Input black stone indices is bxy is empty if not bxy: msg = 'Enter black stone indices (e.g. 1 14 and 0 for end): ' while True: input_text = input(msg) if input_text == '0': break else: j,i = list(map(int, input_text.split())) bxy.append((i,j)) RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w) print('RGB = ', RGB) # Find maximum red scale of black stones RMAX = 0 for j,i in bxy: RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w) print(f"Black stone at ({i},{j}) with RGB = ", RGB) RMAX = np.maximum(RMAX, RGB[0]) # Find the min red scale of the rest to distinguish RMIN_rest = 255 for i,x in enumerate(x_idx, start=1): for j,y in enumerate(y_idx, start=1): if (j,i) not in bxy: RGB = src.average_RGB(rgb, xMAX, yMAX, x, y, roll_w) RMIN_rest = np.minimum(RMIN_rest, RGB[0]) print('\nBlack stones have a maximum red scale =', RMAX) print('Rest of the board have a minimum red scale', RMIN_rest) print('Black stone red scale threshold will be average of these two.\n') # Red scale threshold for black stone detection assert RMAX < RMIN_rest red_scale_th = 0.5 * RMAX + 0.5 * RMIN_rest # WHITE STONE CALIBRATION # Input white stone indices is wxy is empty if not wxy: msg = 'Enter white stone indices (e.g. 1 14 and 0 for end): ' while True: input_text = input(msg) if input_text == '0': break else: j,i = list(map(int, input_text.split())) wxy.append((i,j)) RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w) print('RGB = ', RGB) # Find minimum blue scale of white stones BMIN = 255 for (j,i) in wxy: RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w) print(f"White stone at ({i},{j}) with RGB = ", RGB) BMIN = np.minimum(BMIN, RGB[2]) # Find the max blue scale of the rest to distinguis BMAX_rest = 0 for i,x in enumerate(x_idx, start=1): for j,y in enumerate(y_idx, start=1): if (j,i) not in wxy: RGB = src.average_RGB(rgb, xMAX, yMAX, x, y,roll_w) BMAX_rest = np.maximum(BMAX_rest, RGB[2]) print('\nWhite stones have a minimum blue scale >', BMIN) print('Rest of the board have a maximum blue scale', BMAX_rest) print('White stone blue scale threshold will be average of these two.\n') # Blue scale threshold for white stone detection assert BMIN > BMAX_rest blue_scale_th = 0.5 * BMIN + 0.5 * BMAX_rest return red_scale_th, blue_scale_th def is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \ color, i, j, plot_stuff=False): i,j = j,i # RGB matrix is messed up so this needs to be done x, y = x_idx[i-1], y_idx[j-1] if plot_stuff: fig = plt.figure() plt.imshow(rgb) plt.ylabel('1st index = 1, ..., 19') plt.xlabel('2nd index = 1, ..., 19') plt.title(f"Checking if there is a {color} stone at ({j},{i})") plt.plot(x, y, 'ro', markersize=20, fillstyle='none') plt.show() xMAX, yMAX, _ = rgb.shape roll_w = int(np.round(0.01*xMAX)) xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1) yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1) red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0])) blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2])) msg = f"There is {color} stone at {src.int_coords_to_str(i,j)} = ({i},{j})" if color == 'black' and red_scale < red_scale_th: print(msg) return True elif color == 'white' and blue_scale > blue_scale_th: print(msg) return True else: return False def mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, plot_stuff=True): xMAX, yMAX, _ = rgb.shape roll_w = int(np.round(0.01*xMAX)) new_rgb = np.copy(rgb) bxy, wxy = [], [] # black and white stone lists including pairs for i, x in enumerate(x_idx, start=1): for j, y in enumerate(y_idx, start=1): xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1) yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1) red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0])) blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2])) #print((x,y), red_scale, blue_scale) if red_scale < red_scale_th or blue_scale > blue_scale_th: if blue_scale > blue_scale_th: wxy.append((j,i)) new_rgb[yL:yR, xL:xR,:] = 255, 255, 255 # white stone elif red_scale < red_scale_th: bxy.append((j,i)) new_rgb[yL:yR, xL:xR,:] = 255, 255, 0 # black stone else: new_rgb[yL:yR, xL:xR,:] = 255, 0, 0 # empty if plot_stuff: src.plot_goban_rgb(new_rgb) return bxy, wxy def mark_board_points(rgb, x_idx, y_idx, bxy=[], wxy=[]): """ Mark board points with red squares. Use yellow color for black stones and white color for white stones that are inputted. """ xMAX, yMAX, _ = rgb.shape roll_w = int(np.round(0.01*xMAX)) new_rgb = np.copy(rgb) for i,x in enumerate(x_idx, start=1): for j,y in enumerate(y_idx, start=1): xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1) yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1) if (j,i) in bxy: # black stone new_rgb[yL:yR, xL:xR,:] = 255, 255, 0 # yellow color elif (j,i) in wxy: # white stone new_rgb[yL:yR, xL:xR,:] = 255, 255, 255 # white color else: # empty board point new_rgb[yL:yR, xL:xR,:] = 255, 0, 0 # red color src.plot_goban_rgb(new_rgb) def find_custom_local_minima(ar1, color, plot_stuff): roll_w = int(np.round(len(ar1)/100)) ar2 = subtract_rolling_sum(roll_w, ar1) idx = find_local_minima(ar2) if plot_stuff: plt.plot(ar2, color) for i in idx: plt.plot(i, ar2[i], 'k*') return idx def find_local_minima(ar): # Try to find the optional cut-off that may help determine the 19 points on # the go board. Start with an interval [min_val, max_val] and squeeze until # it hits exactly 19 points # Find indices that correspond to local minima x = argrelmin(ar) idx_list = x[0] target = 19 min_val, max_val = np.amin(ar), 100.0 # Assert that above choices are good assert sum(ar[i] <= min_val for i in idx_list) < target assert sum(ar[i] <= max_val for i in idx_list) > target # Find the cut-off below which there are exactly 19 local minima while True: new_val = 0.5 * min_val + 0.5 * max_val if sum(ar[i] <= new_val for i in idx_list) < target: min_val = new_val elif sum(ar[i] <= new_val for i in idx_list) > target: max_val = new_val elif sum(ar[i] <= new_val for i in idx_list) == target: break # Find the indices return [i for i in idx_list if ar[i] <= new_val] def rolling_sum(w, ar): new_ar = np.zeros(len(ar)) for i in range(len(ar)): if i >= w and i <= len(ar)-w-1: new_ar[i] = np.mean(ar[i-w:i+w+1]) elif i < w: new_ar[i] = np.mean(ar[0:i+1]) elif i > len(ar)-w-1: new_ar[i] = np.mean(ar[i:len(ar)+1]) assert len(new_ar) == len(ar) return new_ar def subtract_rolling_sum(w, ar): return ar - rolling_sum(w,ar) def return_int_pnts(num, rgb, x1, y1, x2, y2): x_vals = np.round(np.linspace(x1, x2, num=num, endpoint=True)) x_vals = x_vals.astype(int) y_vals = np.round(np.linspace(y1, y2, num=num, endpoint=True)) y_vals = y_vals.astype(int) # one of these two must not contain any duplicates assert len(x_vals) == len(set(x_vals)) or len(y_vals) == len(set(y_vals)) # Return RGB values return_array = [rgb[y,x,0:3] for x,y in zip(x_vals, y_vals)] # make all red # for x,y in zip(x_vals, y_vals): # rgb[y,x,0:3] = 255, 0, 0 return x_vals, y_vals, rgb, return_array
/src/screenshot_actions.py
import pyscreeze import numpy as np import matplotlib.pyplot as plt import src def get_digital_goban_state(rgb_pix, plot_stuff=False): # RGB of Black = [ 0, 0, 0] # RGB of White = [255, 255, 255] # RGB of Orange = [255, 160, 16] # Use red scale to find out black stones, blue scale to find out white stones # (1, 1, 1) - Black A1 (upper corner) # (2, 19, 19) - White T10 (lower corner) idx = np.arange(19)+1 m, n, z = rgb_pix.shape assert m == n # Approximate diameter of a stone in terms of pixels stone_diam = n/19 # Calculate pixels where stone centers will be positioned stone_centers = np.round(stone_diam*idx) - 0.5 * np.round(stone_diam) - 1 stone_centers = stone_centers.astype(int) # For every stone center, we will check a square matrix centered around # the stone center and find the average color. If it is black, then the # stone is black, if it is white, then the stone is white, otherwise no stone square_length_in_a_stone = int(np.round((n/19) / np.sqrt(2))) if square_length_in_a_stone % 2 == 0: d = square_length_in_a_stone / 2 else: d = (square_length_in_a_stone-1) / 2 d = int(d-1) # just in case, make square smaller and integer # Calculate the mean of a small matrix around every board point to find out # if there is a black stone or white stone or nothing stones = set() for posi, i in enumerate(stone_centers, start=1): for posj, j in enumerate(stone_centers, start=1): # Find black stones mat = rgb_pix[:,:,0] color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1])) if color < 125: stones.add((1, posj, posi)) # black stone rgb_pix[i-d+1:i+d, j-d+1:j+d, :] = 0 # Find white stones mat = rgb_pix[:,:,2] color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1])) if color > 125: stones.add((2, posj, posi)) # white stone rgb_pix[i-d+1:i+d, j-d+1:j+d] = 255 # Plot for debugging if plot_stuff: plt.imshow(rgb_pix) plt.show() return stones def KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step): UL_outer_x = UL_x - 0.5*goban_step UL_outer_y = UL_y - 0.5*goban_step BR_outer_x = UL_x + 18*goban_step + 0.5*goban_step BR_outer_y = UL_y + 18*goban_step + 0.5*goban_step im = pyscreeze.screenshot(region=(UL_outer_x, UL_outer_y, \ BR_outer_x-UL_outer_x, BR_outer_y-UL_outer_y)) pix = np.array(im) rgb_pix = pix[...,:3] return rgb_pix
/temp/plot_save_coordinates_on_click.py
import matplotlib.pyplot as plt xy=[] def onclick(event): print(event.xdata, event.ydata) xy.append((event.xdata, event.ydata)) fig = plt.figure() plt.plot(range(10)) fig.canvas.mpl_connect('button_press_event', onclick) plt.show() print(xy)
/temp/process_pyhsical_goban_pic.py
import matplotlib.pyplot as plt import imageio import numpy as np import src IMG_PATH = 'images/pyshical_goban_pic1.png' #IMG_PATH = 'images/pyshical_goban_pic2.png' #IMG_PATH = 'images/pyshical_goban_pic3.png' UL_outer_x, UL_outer_y = 315, 24 UR_outer_x, UR_outer_y = 999, 40 BL_outer_x, BL_outer_y = 3, 585 BR_outer_x, BR_outer_y = 1273, 621 #IMG_PATH = 'images/pyshical_goban_pic4.png' #UL_outer_x, UL_outer_y = 321, 235 #UR_outer_x, UR_outer_y = 793, 244 #BL_outer_x, BL_outer_y = 92, 603 #BR_outer_x, BR_outer_y = 933, 608 # Get RGB matrix of the picture with goban rgb = imageio.imread(IMG_PATH) plt.imshow(rgb) plt.show() # Remove non-goban part from the RGB matrix and make it a square matrix rgb = src.rescale_pyhsical_goban_rgb(rgb, \ UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y) # Find the indices of board points in the new square RGB matrix x_idx, y_idx = src.find_board_points(rgb, plot_stuff=True) bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)] src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy) red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th) src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \ 'black', 16,4)
/tests/test_mouse_actions.py
from pynput.mouse import Button, Controller import time import sys import os import pytest sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import src def test_str_to_integer_coordinates(): assert src.str_to_integer_coordinates('A19') == (1, 1) assert src.str_to_integer_coordinates('D16') == (4, 4) assert src.str_to_integer_coordinates('D10') == (4, 10) assert src.str_to_integer_coordinates('T1') == (19, 19) assert src.str_to_integer_coordinates('K10') == (10, 10) def test_integer_coordinates_to_str(): assert src.int_coords_to_str(1, 1) == 'A19' assert src.int_coords_to_str(4, 4) == 'D16' assert src.int_coords_to_str(4, 10) == 'D10' assert src.int_coords_to_str(19, 19) == 'T1' assert src.int_coords_to_str(10, 10) == 'K10' @pytest.mark.slow def test_place_stones_on_all_stars(): print() # Get goban corners UL_x, UL_y, goban_step = src.get_goban_corners() # Obtain mouse controller mouse = Controller() # Place stones on stars print('\n', 41*'-') print(5*'-', 'Placing stones on all stars', 5*'-') print(41*'-', '\n') for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']: i, j = src.str_to_integer_coordinates(str) x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step) src.make_the_move(mouse, x, y) # Get KGS goban as a square grayscale rgb_pix = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)
/tests/test_picture_actions.py
import pytest import imageio import sys, os sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import src # stones - upper-left corner is (1,1), lower-left corner is (19,1) IMG_PATH = ['images/pyshical_goban_pic1.png', 'images/pyshical_goban_pic2.png', \ 'images/pyshical_goban_pic3.png', 'images/pyshical_goban_pic4.png', \ 'images/pyshical_goban_pic5.png'] bxy0, wxy0 = [(4,4), (16,4)], [(4,16),(16,16)] bxy1, wxy1 = [(1,9), (16,8)], [(10,1),(13,19)] bxy2, wxy2 = [(1,19), (17,3)], [(1,3),(19,19)] bxy3, wxy3 = [(1,19), (19,1), (5,4), (6,16), (12,8), (14,6), (16,10), (19,13)], \ [(1,1), (4,10), (7,7), (10,4), (10,10), (12,11), (15,7), (19,19)] bxy4, wxy4 = [(1,1), (19,19), (1,19), (19,1)], [(10,10)] UL_outer_x0, UL_outer_y0 = 315, 24 UR_outer_x0, UR_outer_y0 = 999, 40 BL_outer_x0, BL_outer_y0 = 3, 585 BR_outer_x0, BR_outer_y0 = 1273, 621 UL_outer_x3, UL_outer_y3 = 321, 235 UR_outer_x3, UR_outer_y3 = 793, 244 BL_outer_x3, BL_outer_y3 = 92, 603 BR_outer_x3, BR_outer_y3 = 933, 608 UL_outer_x4, UL_outer_y4 = 414, 256 UR_outer_x4, UR_outer_y4 = 962, 269 BL_outer_x4, BL_outer_y4 = 217, 659 BR_outer_x4, BR_outer_y4 = 1211, 679 @pytest.mark.skip def test_board_outer_corner(): UL_outer_x0_click, UL_outer_y0_click, _, _, _, _, _, _ = \ src.get_pyhsical_board_outer_corners(IMG_PATH[0]) assert abs(UL_outer_x0_click - UL_outer_x0) < 5 # five pixels assert abs(UL_outer_y0_click - UL_outer_y0) < 5 def test_board_state_detection_from_camera_picture(): assert_board_state(IMG_PATH[4], bxy4, wxy4, 'black', bxy4[0], \ UL_outer_x4, UL_outer_y4, UR_outer_x4, UR_outer_y4, \ BL_outer_x4, BL_outer_y4, BR_outer_x4, BR_outer_y4, \ plot_stuff=False) assert_board_state(IMG_PATH[0], bxy0, wxy0, 'black', bxy0[1], \ UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \ BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0) assert_board_state(IMG_PATH[1], bxy1, wxy1, 'white', wxy1[0], \ UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \ BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0, \ plot_stuff=True) assert_board_state(IMG_PATH[2], bxy2, wxy2, 'black', bxy2[0], \ UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \ BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0) assert_board_state(IMG_PATH[3], bxy3, wxy3, 'white', wxy3[6], \ UL_outer_x3, UL_outer_y3, UR_outer_x3, UR_outer_y3, \ BL_outer_x3, BL_outer_y3, BR_outer_x3, BR_outer_y3) def assert_board_state(IMG_PATH, bxy, wxy, color, ij_pair, \ UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y, \ plot_stuff=False): # Get RGB matrix of the picture with goban rgb = imageio.imread(IMG_PATH) # Remove non-goban part from the RGB matrix and make it a square matrix rgb = src.rescale_pyhsical_goban_rgb(rgb, \ UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y) # Find the indices of board points in the new square RGB matrix x_idx, y_idx = src.find_board_points(rgb, plot_stuff=plot_stuff) # Find color thresholds for stone detection red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) # Refind stones using the above thresholds bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \ red_scale_th, blue_scale_th, plot_stuff=plot_stuff) assert set(bxy) == set(bxy_new) assert set(wxy) == set(wxy_new) assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \ red_scale_th, blue_scale_th, color, ij_pair[0], ij_pair[1], \ plot_stuff=True)
/tests/test_play_handsfree_GO.py
import sys, os sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pynput.mouse import Button, Controller import pytest import imageio import src # Write a test of play_handsfree_GO.py using already existing frames img_name = [] folder_name = 'images/sample_game_log/ex1/' # empty board for outer board boundary detection img_name.append(folder_name + 'opencv_frame_1.png') UL_outer_x, UL_outer_y = 376.27419354838713, 91.34516129032261 UR_outer_x, UR_outer_y = 962.08064516129020, 101.66774193548395 BL_outer_x, BL_outer_y = 120.79032258064518, 641.0225806451613 BR_outer_x, BR_outer_y = 1265.3064516129032, 652.6354838709677 # black stones on corners and a white stone at center img_name.append(folder_name + 'opencv_frame_3.png') # white stones on corners and a black stone at center img_name.append(folder_name + 'opencv_frame_4.png') # verifying calibration img_name.append(folder_name + 'opencv_frame_b_1_1.png') # black at (1,1) img_name.append(folder_name + 'opencv_frame_b_1_19.png') # black at (1,19) img_name.append(folder_name + 'opencv_frame_b_19_19.png') # black at (19,19) img_name.append(folder_name + 'opencv_frame_b_19_1.png') # black at (19,1) img_name.append(folder_name + 'opencv_frame_b_10_10.png') # black at (10,10) img_name.append(folder_name + 'opencv_frame_b_4_4.png') # black at (4,4) img_name.append(folder_name + 'opencv_frame_b_4_10.png') # black at (4,10) img_name.append(folder_name + 'opencv_frame_b_4_16.png') # black at (4,16) img_name.append(folder_name + 'opencv_frame_b_16_16.png') # black at (16,16) img_name.append(folder_name + 'opencv_frame_w_1_1.png') # white at (1,1) img_name.append(folder_name + 'opencv_frame_w_10_10.png') # white at (10,10) img_name.append(folder_name + 'opencv_frame_w_16_16.png') # white at (16,16) img_name.append(folder_name + 'opencv_frame_w_19_19.png') # white at (19,19) #opencv_frame_b_10_4.png #opencv_frame_b_10_16.png #opencv_frame_b_16_4.png #opencv_frame_b_16_10.png #opencv_frame_b_19_1.png #opencv_frame_w_1_19.png #opencv_frame_w_4_4.png #opencv_frame_w_4_10.png #opencv_frame_w_4_16.png #opencv_frame_w_10_16.png #opencv_frame_w_16_4.png #opencv_frame_w_16_10.png #opencv_frame_w_19_1.png def test_play_handsfree_GO(): ps = False # STEP 0 - EMPTY GOBAN # Get outer boundaries of pyhsical goban -- skipped for speed ob = [UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y] # Remove non-goban part from the RGB matrix and make it a square matrix # Find the indices of board points in the new square RGB matrix #UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \ # BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = \ # src.get_pyhsical_board_outer_corners(img_name[0]) rgb = imageio.imread(img_name[0]) rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) x_idx, y_idx = src.find_board_points(rgb, plot_stuff=ps) # STEP 1 - GOBAN WITH BLACK STONES ON CORNERS AND A WHITE STONE AT CENTER rgb = imageio.imread(img_name[1]) bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)] rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) _, _ = src.mark_stones(rgb, x_idx, y_idx, \ red_scale_th1, blue_scale_th1, plot_stuff=ps) # STEP 2 - GOBAN WITH WHITE STONES ON CORNERS AND A BLACK STONE AT CENTER rgb = imageio.imread(img_name[2]) wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)] rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy) _, _ = src.mark_stones(rgb, x_idx, y_idx, \ red_scale_th2, blue_scale_th2, plot_stuff=ps) red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2) blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2) # STEP 3 - VERIFY CALIBRATION verify_calibration_for_test_purposes(img_name[3], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 1, 1, ps) verify_calibration_for_test_purposes(img_name[4], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 1, 19, ps) verify_calibration_for_test_purposes(img_name[5], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 19, 19, ps) verify_calibration_for_test_purposes(img_name[6], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 19, 1, ps) verify_calibration_for_test_purposes(img_name[7], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 10, 10, ps) verify_calibration_for_test_purposes(img_name[8], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 4, 4, ps) verify_calibration_for_test_purposes(img_name[9], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 4, 10, ps) verify_calibration_for_test_purposes(img_name[10], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 4, 16, ps) verify_calibration_for_test_purposes(img_name[11], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'black', 16, 16, ps) verify_calibration_for_test_purposes(img_name[12], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'white', 1, 1, ps) verify_calibration_for_test_purposes(img_name[13], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'white', 10, 10, ps) verify_calibration_for_test_purposes(img_name[14], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'white', 16, 16, ps) verify_calibration_for_test_purposes(img_name[15], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, 'white', 19, 19, ps) # DIGITAL BOARD DETECTION # Ask the user to open a KGS board print('\n OPEN A KGS BOARD/GAME NOW') input('ENTER when the digital board is open: ') # Get the user to click on come corners to get to know the digital board UL_x, UL_y, goban_step = src.get_goban_corners() # START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD mouse = Controller() # obtain mouse controller print("Placing a black stone at (10,10)") bxy, wxy = [], [] # empty board in the beginning color, i, j = src.scan_next_move(img_name[7], ob, x_idx, y_idx, \ red_scale_th, blue_scale_th, bxy, wxy, plot_stuff=ps) _, _ = src.play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \ UL_x, UL_y, goban_step) def verify_calibration_for_test_purposes(img, ob, x, y, r, b, c, i, j, ps): rgb = imageio.imread(img) rgb = src.rescale_pyhsical_goban_rgb(rgb, ob) print(f"Verifying a {c} stone at {src.convert_physical_board_ij_to_str(i,j)}...") assert src.is_this_stone_on_the_board(rgb, x, y, r, b, c, i, j, ps)
/tests/test_screenshot_actions.py
import imageio import pytest import sys, os sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import src def test_get_digital_goban_state(): rgb_pix = imageio.imread('images/digital_goban.png') # Process KGS goban grayscale and find the stones assert src.get_digital_goban_state(rgb_pix) == \ set([(1,1,1), (1, 1, 14), (2,19,19)])
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
jessehylton/Podrum
refs/heads/master
{"/src/podrum/nbt/NBT.py": ["/src/podrum/nbt/tag/NamedTag.py"], "/src/podrum/network/PacketPool.py": ["/src/podrum/network/protocol/ResourcePacksInfoPacket.py", "/src/podrum/network/protocol/ServerToClientHandshakePacket.py", "/src/podrum/network/protocol/ClientToServerHandshakePacket.py", "/src/podrum/network/protocol/DisconnectPacket.py"], "/src/podrum/Server.py": ["/src/podrum/utils/Utils.py", "/src/podrum/utils/Logger.py"], "/src/podrum/utils/UUID.py": ["/src/podrum/utils/Utils.py", "/src/podrum/utils/Binary.py"], "/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"], "/src/podrum/Player.py": ["/src/podrum/network/PacketPool.py"], "/src/podrum/Podrum.py": ["/src/podrum/Server.py"], "/src/podrum/utils/Config.py": ["/src/podrum/Server.py"]}
└── └── src └── podrum ├── Player.py ├── Podrum.py ├── Server.py ├── command │ └── Command.py ├── math │ └── Facing.py ├── nbt │ ├── NBT.py │ └── tag │ └── NamedTag.py ├── network │ ├── PacketPool.py │ └── protocol │ ├── ClientToServerHandshakePacket.py │ ├── DisconnectPacket.py │ ├── ResourcePacksInfoPacket.py │ └── ServerToClientHandshakePacket.py ├── resourcepacks │ └── ResourcePack.py ├── utils │ ├── Binary.py │ ├── BinaryStream.py │ ├── Config.py │ ├── Logger.py │ ├── UUID.py │ ├── Utils.py │ └── bcmath.py └── wizard └── Parser.py
/src/podrum/Player.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.network.PacketPool import PacketPool class Player: connection = None server = None logger = None address = Nome name = None locale = None randomId = None uuid = None xuid = None skin = None viewDistance = None gamemode = 0 pitch = 0 yaw = 0 headYaw = 0 onGround = False platformChatId = '' deviceOS = None deviceModel = None deviceId = Nome def __init__(self, connection, address, logger, server): self.connection = connection self.address = address self.logger = logger self.server = server
/src/podrum/Podrum.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ #!/usr/bin/env python3 import sys import inspect from os import getcwd, path from threading import Thread sys.path.insert(0, path.dirname(path.dirname(path.abspath(inspect.getfile(inspect.currentframe()))))) from podrum.Server import Server if __name__ == "__main__": if len(sys.argv) >= 3: if sys.argv[1] == "--no_wizard" and sys.argv[2] == "-travis": serverThread = Thread(target=Server, args=(getcwd(), False, True)) else: print("[!] None valid args selected.") serverThread = Thread(target=Server, args=(getcwd(), True)) elif len(sys.argv) == 2: if sys.argv[1] == "--no_wizard": serverThread = Thread(target=Server, args=(getcwd(), False)) else: print("[!] None valid args selected.") serverThread = Thread(target=Server, args=(getcwd(), True)) else: serverThread = Thread(target=Server, args=(getcwd(), True)) serverThread.start()
/src/podrum/Server.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ import time import os from podrum.lang.Base import Base from podrum.utils.Logger import Logger from podrum.utils.ServerFS import ServerFS from podrum.utils.Utils import Utils from podrum.wizard.Wizard import Wizard from pyraklib.server.PyRakLibServer import PyRakLibServer from pyraklib.server.ServerHandler import ServerHandler class Server: path = None withWizard = None port = 19132 podrumLogo = """ ____ _ | _ \ ___ __| |_ __ _ _ _ __ ___ | |_) / _ \ / _` | '__| | | | '_ ` _ \ | __/ (_) | (_| | | | |_| | | | | | | |_| \___/ \__,_|_| \__,_|_| |_| |_| """ def __init__(self, path, withWizard, isTravisBuild = False): super().__init__() startTime = Utils.microtime(True) self.path = path self.withWizard = withWizard if(withWizard): ServerFS.checkAllFiles(path) else: Wizard.skipWizard(path, True) port = self.port print(str(self.podrumLogo)) Wizard.isInWizard = False Logger.log('info', str(Base.get("startingServer")).replace("{ip}", str(Utils.getPrivateIpAddress())).replace("{port}", str(port))) Logger.log('info', str(Base.get("extIpMsg")).replace("{ipPublic}", str(Utils.getPublicIpAddress()))) Logger.log('info', str(Base.get("license"))) server = PyRakLibServer(port=19132) handler = ServerHandler(server, None) handler.sendOption("name", "MCPE;Podrum powered server;407;1.16.0;0;0;0;PodrumPoweredServer;0") doneTime = Utils.microtime(True) finishStartupSeconds = "%.3f" % (doneTime - startTime) Logger.log('info', f'Done in {str(finishStartupSeconds)}s. Type "help" to view all available commands.') if (isTravisBuild): Server.checkTravisBuild(path) else: while Wizard.isInWizard == False: cmd = input('> ') Server.command(cmd, True) cmd = None ticking = True while ticking: time.sleep(0.002) def command(string, fromConsole): if string.lower() == 'stop': Logger.log('info', 'Stopping server...') Utils.killServer() elif string.lower() == '': return elif string.lower() == 'help': Logger.log('info', '/stop: Stops the server') else: Logger.log('error', str(Base.get("invalidCommand"))) def checkTravisBuild(path): if not ServerFS.checkForFile(path, "server.json"): Logger.log("error", "Couldn't find server.json file.") os._exit(1) if os.path.getsize(f'{path}/server.json') == 0: Logger.log("error", "The server.json file is empty.") os._exit(1) print("Build success.") os._exit(0)
/src/podrum/command/Command.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ class Command: def onCommand(string, fromConsole): pass
/src/podrum/math/Facing.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ class Facing: AXIS_Y = 0 AXIS_Z = 1 AXIS_X = 2 FLAG_AXIS_POSITIVE = 1 DOWN = AXIS_Y << 1 UP = (AXIS_Y << 1) | FLAG_AXIS_POSITIVE NORTH = AXIS_Z << 1 SOUTH = (AXIS_Z << 1) | FLAG_AXIS_POSITIVE WEST = AXIS_X << 1 EAST = (AXIS_X << 1) | FLAG_AXIS_POSITIVE ALL = [ DOWN, UP, NORTH, SOUTH, WEST, EAST ] HORIZONTAL = [ NORTH, SOUTH, WEST, EAST ] CLOCKWISE = { AXIS_Y: { NORTH: EAST, EAST: SOUTH, SOUTH: WEST, WEST: NORTH }, AXIS_Z: { UP: EAST, EAST: DOWN, DOWN: WEST, WEST: UP }, AXIS_X: { UP: NORTH, NORTH: DOWN, DOWN: SOUTH, SOUTH: UP } } @staticmethod def axis(direction): return direction >> 1 @staticmethod def is_positive(direction): return (direction & Facing.FLAG_AXIS_POSITIVE) == Facing.FLAG_AXIS_POSITIVE @staticmethod def opposite(direction): return direction ^ Facing.FLAG_AXIS_POSITIVE @staticmethod def rotate(direction, axis, clockwise): if not Facing.CLOCKWISE[axis]: raise ValueError("Invalid axis {}".format(axis)) if not Facing.CLOCKWISE[axis][direction]: raise ValueError("Cannot rotate direction {} around axis {}".format(direction, axis)) rotated = Facing.CLOCKWISE[axis][direction] return rotated if clockwise else Facing.opposite(rotated) @staticmethod def validate(facing): if facing in Facing.ALL: raise ValueError("Invalid direction {}".format(facing))
/src/podrum/nbt/NBT.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from abc import ABCMeta, abstractmethod from podrum.nbt.tag.ByteArrayTag import ByteArrayTag from podrum.nbt.tag.ByteTag import ByteTag from podrum.nbt.tag.CompoundTag import CompoundTag from podrum.nbt.tag.DoubleTag import DoubleTag from podrum.nbt.tag.FloatTag import FloatTag from podrum.nbt.tag.IntArrayTag import IntArrayTag from podrum.nbt.tag.IntTag import IntTag from podrum.nbt.tag.ListTag import ListTag from podrum.nbt.tag.LongArrayTag import LongArrayTag from podrum.nbt.tag.LongTag import LongTag from podrum.nbt.tag.NamedTag import NamedTag from podrum.nbt.tag.ShortTag import ShortTag from podrum.nbt.tag.StringTag import StringTag class NBT: __metaclass__ = ABCMeta TAG_End = 0 TAG_Byte = 1 TAG_Short = 2 TAG_Int = 3 TAG_Long = 4 TAG_Float = 5 TAG_Double = 6 TAG_ByteArray = 7 TAG_String = 8 TAG_List = 9 TAG_COMPOUND = 10 TAG_IntArray = 11 TAG_LongArray = 12 @staticmethod def createTag(type: int) -> NamedTag: if type == NBT.TAG_Byte: return ByteTag() elif type == NBT.TAG_Short: return ShortTag() elif type == NBT.TAG_Int: return IntTag() elif type == NBT.TAG_Long: return LongTag() elif type == NBT.TAG_Float: return FloatTag() elif type == NBT.TAG_Double: return DoubleTag() elif type == NBT.TAG_ByteArray: return ByteArrayTag() elif type == NBT.TAG_String: return StringTag() elif type == NBT.TAG_List: return ListTag() elif type == NBT.TAG_Compound: return CompoundTag() elif type == NBT.TAG_IntArray: return IntArrayTag() elif type == NBT.TAG_LongArray: return LongArrayTag() else: raise ValueError("Unknown NBT tag type " + str(type))
/src/podrum/nbt/tag/NamedTag.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from abc import ABCMeta, abstractmethod from podrum.nbt.NBTStream import NBTStream from podrum.nbt.ReaderTracker import ReaderTracker class NamedTag: __metaclass__ = ABCMeta name = None cloning = False def __init__(self, name = ''): if len(name > 32767): raise ValueError("Tag name cannot be more than 32767 bytes, got length " + str(len(name))) self.name = name def getName(): return NamedTag.name def setName(name): NamedTag.name = name def getValue(): pass def getType(): pass def write(nbt: NBTStream): pass def read(nbt: NBTStream, tracker: ReaderTracker): pass def toString(indentation = 0): return (" " * indentation) + type(object) + ": " + (("name='NamedTag.name', ") if (NamedTag.name != "") else "") + "value='" + str(NamedTag.getValue()) + "'" def safeClone() -> NamedTag: if NamedTag.cloning: raise ValueError("Recursive NBT tag dependency detected") NamedTag.cloning = True retval = NamedTag.copy() NamedTag.cloning = False retval.cloning = False return retval def equals(that: NamedTag): return NamedTag.name == that.name and NamedTag.equalsValue(that) def equalsValue(that: NamedTag): return isinstance(that, NamedTag()) and NamedTag.getValue() == that.getValue()
/src/podrum/network/PacketPool.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.network.protocol.ClientToServerHandshakePacket import ClientToServerHandshakePacket from podrum.network.protocol.DataPacket import DataPacket from podrum.network.protocol.DisconnectPacket import DisconnectPacket from podrum.network.protocol.LoginPacket import LoginPacket from podrum.network.protocol.PlayStatusPacket import PlayStatusPacket from podrum.network.protocol.ResourcePacksInfoPacket import ResourcePacksInfoPacket from podrum.network.protocol.ServerToClientHandshakePacket import ServerToClientHandshakePacket class PacketPool: packetPool = {} def __init__(self): self.registerPackets() def registerPacket(packet): self.pool[packet.NID] = packet.copy() def registerPackets(self): self.registerPacket(ClientToServerHandshakePacket) self.registerPacket(DisconnectPacket) self.registerPacket(LoginPacket) self.registerPacket(PlayStatusPacket) self.registerPacket(ResourcePacksInfoPacket) self.registerPacket(ServerToClientHandshakePacket)
/src/podrum/network/protocol/ClientToServerHandshakePacket.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.network.protocol.DataPacket import DataPacket from podrum.network.protocol.ProtocolInfo import ProtocolInfo class ClientToServerHandshakePacket(DataPacket): NID = ProtocolInfo.CLIENT_TO_SERVER_HANDSHAKE_PACKET def canBeSentBeforeLogin(): return True def encodePayload(): pass def decodePayload(): pass
/src/podrum/network/protocol/DisconnectPacket.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.network.protocol.DataPacket import DataPacket from podrum.network.protocol.ProtocolInfo import ProtocolInfo class DisconnectPacket(DataPacket): NID = ProtocolInfo.DISCONNECT_PACKET hideDisconnectionScreen = False message = "" def canBeSentBeforeLogin(): return True def decodePayload(self): self.hideDisconnectionScreen = self.getBool() if not self.hideDisconnectionScreen: self.message = self.getString() def encodePayload(self): self.putBool(self.hideDisconnectionScreen) if not self.hideDisconnectionScreen: self.putString(self.message)
/src/podrum/network/protocol/ResourcePacksInfoPacket.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.network.protocol.DataPacket import DataPacket from podrum.network.protocol.ProtocolInfo import ProtocolInfo class ResourcePacksInfoPacket(DataPacket): NID = ProtocolInfo.RESOURCE_PACKS_INFO_PACKET mustAccept = False hasScripts = False behaviorPackEntries = [] resourcePackEntries = [] def decodePayload(self): self.mustAccept = self.getBool() self.hasScripts = self.getBool() behaviorPackCount = self.getLShort() while behaviorPackCount > 0: self.getString() self.getString() self.getLLong() self.getString() self.getString() self.getString() self.getBool() behaviorPackCount -= 1 resourcePackCount = self.getLShort() while resourcePackCount > 0: self.getString() self.getString() self.getLLong() self.getString() self.getString() self.getString() self.getBool() resourcePackCount -= 1 def encodePayload(self): self.putBool(self.mustAccept) self.putBool(self.hasScripts) self.putLShort(len(self.behaviorPackEntries)) for entry in self.behaviorPackEntries: self.putString(entry.getPackId()) self.putString(entry.getPackVersion()) self.putLLong(entry.getPackSize()) self.putString("") # TODO: encryption key self.putString("") # TODO: subpack name self.putString("") # TODO: content identity self.putBool(False) # TODO: has scripts (?) self.putLShort(len(self.resourcePackEntries)) for entry in self.resourcePackEntries: self.putString(entry.getPackId()) self.putString(entry.getPackVersion()) self.putLLong(entry.getPackSize()) self.putString("") # TODO: encryption key self.putString("") # TODO: subpack name self.putString("") # TODO: content identity self.putBool(False) # TODO: seems useless for resource packs
/src/podrum/network/protocol/ServerToClientHandshakePacket.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.network.protocol.DataPacket import DataPacket from podrum.network.protocol.ProtocolInfo import ProtocolInfo class ServerToClientHandshakePacket(DataPacket): NID = ProtocolInfo.SERVER_TO_CLIENT_HANDSHAKE_PACKET jwt = None def canBeSentBeforeLogin(): return True def decodePayload(self): self.jwt = self.getString() def encodePayload(self): self.putString(self.jwt)
/src/podrum/resourcepacks/ResourcePack.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ class ResourcePack: def getPath(): pass def getPackName(): pass def getPackId(): pass def getPackSize(): pass def getPackVersion(): pass def getSha256(): pass def getPackChunk(start, length): pass
/src/podrum/utils/Binary.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from struct import unpack, pack, calcsize from re import match import decimal import sys from .bcmath import bcmath class Binary: def checkLength(string, expect): length = len(string) assert (length == expect), 'Expected ' + str(expect) + 'bytes, got ' + str(length) @staticmethod def signByte(value: int): if calcsize == 8: return (int(value) & 0xffffffff) >> 56 else: return (int(value) & 0xffffffff) >> 24 @staticmethod def unsignByte(value: int): return int(value) & 0xff @staticmethod def signShort(value: int): if calcsize == 8: return (int(value) & 0xffffffff) >> 48 else: return (int(value) & 0xffffffff) >> 16 @staticmethod def unsignShort(value: int): return int(value) & 0xffff @staticmethod def signInt(value: int): if calcsize == 8: return (int(value) & 0xffffffff) >> 32 else: return (int(value) & 0xffffffff) >> 31 @staticmethod def unsignInt(value: int): return int(value) & 0xffffffff @staticmethod def readTriad(str: bytes) -> int: Binary.checkLength(str, 3) return unpack('>L', b'\x00' + str)[0] @staticmethod def writeTriad(value: int) -> bytes: return pack('>L', value)[1:] @staticmethod def readLTriad(str: bytes) -> int: Binary.checkLength(str, 3) return unpack('<L', b'\x00' + str)[0] @staticmethod def writeLTriad(value: int) -> bytes: return pack('<L', value)[0:-1] @staticmethod def readBool(b: bytes) -> int: return unpack('?', b)[0] @staticmethod def writeBool(b: int) -> bytes: return b'\x01' if b else b'\x00' @staticmethod def readByte(c: bytes) -> int: Binary.checkLength(c, 1) return unpack('>B', c)[0] @staticmethod def readSignedByte(c: bytes) -> int: Binary.checkLength(c, 1) return unpack('>b', c)[0] @staticmethod def writeByte(c: int) -> bytes: return pack(">B", c) @staticmethod def readShort(str: bytes) -> int: Binary.checkLength(str, 2) return unpack('>H', str)[0] @staticmethod def readSignedShort(str: bytes) -> int: Binary.checkLength(str, 2) return Binary.signShort(Binary.readShort(str)) @staticmethod def writeShort(value: int) -> bytes: return pack('>H', value) @staticmethod def readLShort(str: bytes) -> int: Binary.checkLength(str, 2) return unpack('<H', str)[0] @staticmethod def readSignedLShort(str: bytes) -> int: Binary.checkLength(str, 2) return Binary.signShort(Binary.readLShort(str)) @staticmethod def writeLShort(value: int) -> bytes: return pack('<H', value) @staticmethod def readInt(str: bytes) -> int: Binary.checkLength(str, 4) return unpack('>L', str)[0] @staticmethod def writeInt(value: int) -> bytes: return pack('>L', value) @staticmethod def readLInt(str: bytes) -> int: Binary.checkLength(str, 4) return unpack('<L', str)[0] @staticmethod def writeLInt(value: int) -> bytes: return pack('<L', value) @staticmethod def readFloat(str: bytes) -> int: Binary.checkLength(str, 4) return unpack('>f', str)[0] @staticmethod def readRoundedFloat(str, accuracy): return round(Binary.readFloat(str), accuracy) @staticmethod def writeFloat(value: int) -> bytes: return pack('>f', value) @staticmethod def readLFloat(str: bytes) -> int: Binary.checkLength(str, 4) return unpack('<f', str)[0] @staticmethod def readRoundedLFloat(str, accuracy): return round(Binary.readLFloat(str), accuracy) @staticmethod def writeLFloat(value: int) -> bytes: return pack('<f', value) @staticmethod def printFloat(value): return match(r"/(\\.\\d+?)0+$/", "" + value).group(1) @staticmethod def readDouble(str: bytes) -> int: Binary.checkLength(str, 8) return unpack('>d', str)[0] @staticmethod def writeDouble(value: int) -> bytes: return pack('>d', value) @staticmethod def readLDouble(str: bytes) -> int: Binary.checkLength(str, 8) return unpack('<d', str)[0] @staticmethod def writeLDouble(value: int) -> bytes: return pack('<d', value) @staticmethod def readLong(str: bytes) -> int: Binary.checkLength(str, 8) return unpack('>L', str)[0] @staticmethod def writeLong(value: int) -> bytes: return pack('>L', value) @staticmethod def readLLong(str: bytes) -> int: Binary.checkLength(str, 8) return unpack('<L', str)[0] @staticmethod def writeLLong(value: int) -> bytes: return pack('<L', value) @staticmethod def readUnsignedVarInt(buffer, offset): value = "0"; buffer = str(buffer) i = 0 while i <= 35: i += 7 offset += 1 b = ord(buffer[offset]) value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow("2", str(i)))) if (b & 0x80) == 0: return value elif (len(buffer) - 1) < int(offset): raise TypeError('Expected more bytes, none left to read') raise TypeError('Varint did not terminate after 5 bytes!') @staticmethod def readVarInt(buffer, offset): raw = Binary.readUnsignedVarInt(buffer, offset) temp = bcmath.bcdiv(raw, "2") if bcmath.bcmod(raw, "2") == "1": temp = bcmath.bcsub(bcmath.bcmul(temp, "-1"), "1") return temp @staticmethod def writeUnsignedVarInt(value): buffer = "" value = value & 0xffffffff if bcmath.bccomp(value, "0") == -1: value = bcmath.bcadd(value, "18446744073709551616") i = 0 while i <= 5: i = i + 1 byte = int(bcmath.bcmod(value, "128")) value = bcmath.bcdiv(value, "128") if value != 0: buffer += chr(byte | 0x80) else: buffer += chr(byte) return buffer raise TypeError('Value too large to be encoded as a varint') @staticmethod def writeVarInt(value): value = bcmath.bcmod(bcmath.bcmul(value, "2"), "18446744073709551616") if bcmath.bccomp(value, "0") == -1: value = bcmath.bcsub(bcmath.bcmul(value, "-1"), "1") return Binary.writeUnsignedVarInt(value) @staticmethod def readUnsignedVarLong(buffer, offset): value = "0" buffer = str(buffer) i = 0 while i <= 63: i += 7 offset += 1 b = ord(buffer[offset]) value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow("2", str(i)))) if (b & 0x80) == 0: return value elif (len(buffer) - 1) < int(offset): raise TypeError("Expected more bytes, none left to read") raise TypeError("VarLong did not terminate after 10 bytes!") @staticmethod def readVarLong(buffer, offset): raw = Binary.readUnsignedVarLong(buffer, offset) temp = bcmath.bcdiv(raw, "2") if bcmath.bcmod(raw, "2") == "1": temp = bcmath.bcsub(bcmath.bcmul(temp, "-1"), "1") return temp @staticmethod def writeUnsignedVarLong(value): buffer = "" if bcmath.bccomp(value, "0") == -1: value = bcmath.bcadd(value, "18446744073709551616") i = 0 while i <= 10: i = i + 1 byte = int(bcmath.bcmod(value, "128")) value = bcmath.bcdiv(value, "128") if value != 0: buffer += chr(byte | 0x80) else: buffer += chr(byte) return buffer raise TypeError("Value too large to be encoded as a VarLong") @staticmethod def writeVarLong(value): value = bcmath.bcmod(bcmath.bcmul(value, "2"), "18446744073709551616") if bcmath.bccomp(value, "0") == -1: value = bcmath.bcsub(bcmath.bcmul(value, "-1"), "1") return Binary.writeUnsignedVarLong(value) @staticmethod def flipShortEndianness(value): return Binary.readLShort(Binary.writeShort(value)) @staticmethod def flipIntEndianness(value): return Binary.readLInt(Binary.writeInt(value)) @staticmethod def flipLongEndianness(value): return Binary.readLLong(Binary.writeLong(value))
/src/podrum/utils/BinaryStream.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from podrum.utlis.Binary import Binary from podrum.utlis.UUID import UUID class BinaryStream: buffer = "" offset = None def __int__(self, buffer = "", offset = 0): self.buffer = buffer self.offset = offset def reset(self): self.buffer = "" self.offset = 0 def setBuffer(self, buffer = "", offset = 0): self.buffer = buffer self.offset = int(offset) def getOffset(self): return self.offset def getBuffer(self): return self.buffer def get(self, len): if len < 0: self.offset = len(self.buffer) - 1; return "" elif len == True: str = self.buffer[0:self.offset] self.offset = len(self.buffer) return str buffer = self.buffer[self.offset:self.offset+len] self.offset += length return buffer def put(self, str): self.buffer += str def getBool(self): return self.get(1) != b'\x00' def putBool(self, v): self.buffer += (b"\x01" if v else b"\x00") def getByte(self): self.offset += 1 return ord(self.buffer[self.offset]) def putByte(self, v): self.buffer += chr(v) def getLong(self): return Binary.readLong(self.get(8)) def putLong(self, v): self.buffer += Binary.writeLong(v) def getLLong(self): return Binary.readLLong(self.get(8)) def putLLong(self, v): self.buffer += Binary.writeLLong(v) def getInt(self): return Binary.readInt(self.get(4)) def putInt(self, v): self.buffer += Binary.writeInt(v) def getLInt(self): return Binary.readLInt(self.get(4)) def putLInt(self, v): self.buffer += Binary.writeLInt(v) def getShort(self): return Binary.readShort(self.get(2)) def putShort(self, v): self.buffer += Binary.writeShort(v) def getLShort(self): return Binary.readLShort(self.get(2)) def putLShort(self, v): self.buffer += Binary.writeLShort(v) def getSignedShort(self): return Binary.readSignedShort(self.get(2)) def getSignedLShort(self): return Binary.readSignedLShort(self.get(4)) def getFloat(self): return Binary.readFloat(self.get(4)) def putFloat(self, v): self.buffer += Binary.writeFloat(v) def getLFloat(self): return Binary.readLFloat(self.get(4)) def putLFloat(self, v): self.buffer += Binary.writeLFloat(v) def getRoundedFloat(self, accuracy): return Binary.readRoundedFloat(self.get(4), accuracy) def getRoundedLFloat(self, accuracy): return Binary.readRoundedLFloat(self.get(4), accuracy) def getTriad(self): return Binary.readTriad(self.get(3)) def putTriad(self, v): self.buffer += Binary.writeTriad(v) def getLTriad(self): return Binary.readLTriad(self.get(3)) def putLTriad(self, v): self.buffer += Binary.writeLTriad(v) def getUnsignedVarInt(self): return Binary.readUnsignedVarInt(self.buffer, self.offset) def putUnsignedVarInt(self, v): self.put(Binary.writeUnsignedVarInt(v)) def getVarInt(self): return Binary.readVarInt(self.buffer, self.offset) def putVarInt(self, v): self.put(Binary.writeVarInt(v)) def getUnsignedVarLong(self): return Binary.readUnsignedVarLong(self.buffer, self.offset) def putUnsignedVarLong(self, v): self.put(Binary.writeUnsignedVarLong(v)) def getVarLong(self): return Binary.readVarLong(self.buffer, self.offset) def putVarLong(self, v): self.put(Binary.writeVarLong(v)) def getString(self): self.get(self.getUnsignedVarInt()) def putString(self, v): self.putUnsignedVarInt(len(v)) self.put(v) def getUUID(self): part1 = self.getLInt() part0 = self.getLInt() part3 = self.getLInt() part2 = self.getLInt() return UUID(part0, part1, part2, part3) def putUUID(self, uuid: UUID): self.putLInt(uuid.getPart(1)) self.putLInt(uuid.getPart(0)) self.putLInt(uuid.getPart(3)) self.putLInt(uuid.getPart(2)) def feof(self): try: self.buffer[self.offset] return True except IndexError: return False
/src/podrum/utils/Config.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ import re import os import json import yaml import pickle from podrum.utils import Logger from podrum.ServerFS.ServerFS import read from podrum.Server import Server class Config: DETECT = -1 PROPERTIES = 0 CNF = PROPERTIES JSON = 1 YAML = 2 EXPORT = 3 SERIALIZED = 4 ENUM = 5 ENUMERATION = ENUM config = [] nestedCache = [] file = '' correct = False type = DETECT is_array = lambda var: isinstance(var, (list, tuple)) formats = [{ "properties" : PROPERTIES, "cnf" : CNF, "conf" : CNF, "config" : CNF, "json" : JSON, "js" : JSON, "yml" : YAML, "yaml" : YAML, "export" : EXPORT, "xport" : EXPORT, "sl" : SERIALIZED, "serialize" : SERIALIZED, "txt" : ENUM, "list" : ENUM, "enum" : ENUM, }] def __init__(self, file, type = DETECT, default = [], correct=None): self.load(file, type, default) correct = self.correct @staticmethod def isset(self, variable): return variable in locals() or variable in globals() def reload(self): self.config = [] self.nestedCache = [] self.correct = False self.load(self.file, self.type) @staticmethod def fixYAMLIndexes(str): return re.sub(r"#^([ ]*)([a-zA-Z_]{1}[ ]*)\\:$#m", r"$1\"$2\":", str) def load(self, file, type=DETECT, default = []): self.correct = True self.type = int(type) self.file = file if not self.is_array(default): default = [] if not os.path.exists(file): self.config = default self.save() else: if self.type == self.DETECT: bname = os.path.basename(self.file) extension = bname.split(".") arrlist = extension.pop() extension = arrlist.strip().lower() if self.isset(self.formats[extension]): self.type = self.formats[extension] else: self.correct = False if self.correct: content = open(self.file).read() if (self.type == self.PROPERTIES) and (self.type == self.CNF): self.parseProperties(content) elif self.type == self.JSON: self.config = json.loads(content) elif self.type == self.YAML: content = self.fixYAMLIndexes(content) self.config = yaml.load(content) elif self.type == self.SERIALIZED: self.config = pickle.loads(content) elif self.type == self.ENUM: self.parseList(content) else: self.correct = False return False if not self.is_array(self.config): # Is array doesn't exist self.config = default if self.fillDefaults(default, self.config) > 0: self.save() else: return False return True def check(): return correct = True def save(): if self.correct == True: try: content = None if (type == PROPERTIES) or (type == CNF): content = writeProperties() elif type == JSON: content = json.dumps(config) elif type == YAML: content = yaml.emit(config) elif type == SERIALIZED: content = pickle.dumps(self.config) elif type == ENUM: "\r\n".join(config.keys()) else: correct = False return False except ValueError: logger.log('error', f'Could not save Config {self.file}') return True else: return false
/src/podrum/utils/Logger.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ from datetime import datetime from podrum.utils.TextFormat import TextFormat TextFormat = TextFormat() class Logger: def log(type_, content): time = datetime.now() if type_ == 'info': print(f'{TextFormat.BLUE}[INFO: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == 'warn': print(f'{TextFormat.YELLOW}[WARNING: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == 'error': print(f'{TextFormat.RED}[ERROR: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == 'success': print(f'{TextFormat.GREEN}[SUCCESS: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == "emergency": print(f'{TextFormat.GOLD}[EMERGENCY: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == "alert": print(f'{TextFormat.PURPLE}[ALERT: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == "notice": print(f'{TextFormat.AQUA}[NOTICE: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == "critical": print(f'{TextFormat.RED}[CRITICAL: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') elif type_ == "debug": print(f'{TTextFormat.GRAY}[DEBUG: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}') else: print(f'[{type_.upper()}: {time.strftime("%H:%M")}]{content}')
/src/podrum/utils/UUID.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ import hashlib import os import random import time from podrum.utils.Binary import Binary from podrum.utils.Utils import Utils class UUID: parts = [0, 0, 0, 0] version = None def __init__(self, part1 = 0, part2 = 0, part3 = 0, part4 = 0, version = None): self.parts[0] = int(part1) self.parts[1] = int(part2) self.parts[2] = int(part3) self.parts[3] = int(part4) self.version = (self.parts[1] & 0xf000) >> 12 if version == None else int(version) def getVersion(self): return self.version def equals(self, uuid: UUID): return uuid.parts[0] == self.parts[0] and uuid.parts[1] == self.parts[1] and uuid.parts[2] == self.parts[2] and uuid.parts[3] == self.parts[3] def fromBinary(self, uuid, version = None): if len(uuid) != 16: raise Exception("Must have exactly 16 bytes") return UUID(Binary.readInt(Utils.substr(uuid, 0, 4)), Binary.readInt(Utils.substr(uuid, 4, 4)), Binary.readInt(Utils.substr(uuid, 8, 4)), Binary.readInt(Utils.substr(uuid, 12, 4)), version) def fromString(self, uuid, version = None): return self.fromBinary(Utils.hex2bin(uuid.strip().replace("-", "")), version) def fromData(self, data): hash = hashlib.new("md5").update("".join(data)) return self.fromBinary(hash, 3) def fromRandom(self): return self.fromData(Binary.writeInt(int(time.time())), Binary.writeShort(os.getpid()), Binary.writeShort(os.geteuid()), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff))) def toBinary(self): return Binary.writeInt(self.parts[0]) + Binary.writeInt(self.parts[1]) + Binary.writeInt(self.parts[2]) + Binary.writeInt(self.parts[3]) def toString(self): hex = Utils.bin2hex(self.toBinary()) if self.version != None: return Utils.substr(hex, 0, 8) + "-" + Utils.substr(hex, 8, 4) + "-" + int(self.version, 16) + Utils.substr(hex, 13, 3) + "-8" + Utils.substr(hex, 17, 3) + "-" + Utils.substr(hex, 20, 12) return Utils.substr(hex, 0, 8) + "-" + Utils.substr(hex, 8, 4) + "-" + Utils.substr(hex, 12, 4) + "-" + Utils.substr(hex, 16, 4) + "-" + Utils.substr(hex, 20, 12) def getPart(self, partNumber: int): if partNumber < 0 or partNumber > 3: raise Exception("Invalid UUID part index" + str(partNumber)) return self.parts[partNumber]
/src/podrum/utils/Utils.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ import base64 import binascii import json import os import signal import sys import socket import time import urllib import hmac import hashlib class Utils: def getOS(): if sys.platform == 'linux' or sys.platform == 'linux2': return 'linux' elif sys.platform == 'darwin': return 'osx' elif sys.platform == 'win32' or sys.platform == 'win64': return 'windows' def killServer(): os.kill(os.getpid(), signal.SIGTERM) def getPrivateIpAddress(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] return ip def getPublicIpAddress(): ip = urllib.request.urlopen('https://ident.me').read().decode('utf8') return ip def microtime(get_as_float = False) : if get_as_float: return time.time() else: return '%f %d' % math.modf(time.time()) def substr(string, start, length = None): if start < 0: start = start + len(string) if not length: return string[start:] elif length > 0: return string[start:start + length] else: return string[start:length] def hex2bin(hexdec): if hexdec == 'x': return False if hexdec == '': return False dec = int(hexdec, 16) b = binascii.unhexlify('%x' % dec) return b def binToHex(b): return binascii.hexlify(b) def HMACSHA256(data, secret): encodedData = data.encode() byteSecret = secret.encode() return hmac.new(byteSecret, encodedData, hashlib.sha256).hexdigest().upper() def base64UrlEncode(data): return base64.urlsafe_b64encode(data.encode()).replace(b"=", b"").decode() def base64UrlDecode(data): return base64.urlsafe_b64decode(data).decode() def encodeJWT(header, payload, secret): body = Utils.base64UrlEncode(json.dumps(header)) + "." + Utils.base64UrlEncode(json.dumps(payload)) secret = Utils.HMACSHA256(body, secret) return body + "." + Utils.base64UrlEncode(secret) def decodeJWT(token: str): [headB64, payloadB64, sigB64] = token.split(".") rawPayloadJSON = Utils.base64UrlDecode(payloadB64) if rawPayloadJSON == False: raise Exception("Payload base64 is invalid and cannot be decoded") decodedPayload = json.loads(rawPayloadJSON) if isinstance(decodedPayload, str): decodedPayload = json.loads(decodedPayload) if not isinstance(decodedPayload, dict): raise Exception("Decoded payload should be dict, " + str(type(decodedPayload).__name__) + " received") return decodedPayload
/src/podrum/utils/bcmath.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ import decimal class bcmath: @staticmethod def bcmul(num1, num2, scale=None): if scale != None: decimal.getcontext().prec = scale result = decimal.Decimal(num1) * decimal.Decimal(num2) return int(result) @staticmethod def bcdiv(num1, num2, scale=None): if scale != None: decimal.getcontext().prec = scale result = decimal.Decimal(num1) / decimal.Decimal(num2) return int(result) @staticmethod def bcadd(num1, num2, scale=None): if scale != None: decimal.getcontext().prec = scale result = decimal.Decimal(num1) + decimal.Decimal(num2) return int(result) @staticmethod def bcsub(num1, num2, scale=None): if scale != None: decimal.getcontext().prec = scale result = decimal.Decimal(num1) - decimal.Decimal(num2) return int(result) @staticmethod def bccomp(num1, num2): result = (int(num1) > int(num2)) - (int(num1) < int(num2)) return int(result) @staticmethod def bcmod(num1, num2): result = int(num1) % int(num2) return int(result) @staticmethod def bcpow(num1, num2): result = int(num1) ** int(num2) return int(result) @staticmethod def bcpowmod(num1, num2, mod): result = pow(num1, num2, mod) return int(result) @staticmethod def bcscale(scale): result = decimal.getcontext().prec = scale return int(result) @staticmethod def bcsqrt(num): result = math.sqrt(num) return int(result)
/src/podrum/wizard/Parser.py
""" * ____ _ * | _ \ ___ __| |_ __ _ _ _ __ ___ * | |_) / _ \ / _` | '__| | | | '_ ` _ \ * | __/ (_) | (_| | | | |_| | | | | | | * |_| \___/ \__,_|_| \__,_|_| |_| |_| * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. """ import os from podrum.lang import Base class Parser: def checkYesNo(str): str = str.lower() if str == 'y' or str == 'yes': return True elif str == 'n' or str == 'no': return False else: return def checkIfLangExists(str): path = os.getcwd() + '/src/podrum/lang/' allLangs = Base.Base.getLangNames(path) if(str in allLangs): return True else: return False
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
ddward/ansible
refs/heads/master
{"/app.py": ["/db.py", "/user.py", "/build_dir.py"], "/user.py": ["/db.py"]}
└── ├── app.py ├── build_dir.py ├── db.py ├── penetrationTesting.py ├── sanitize_path.py └── user.py
/app.py
from cryptography.fernet import Fernet import datetime from flask import (flash, Flask, g, Markup, redirect, render_template, request, send_from_directory, session, url_for) import functools import logging import os from secrets import token_urlsafe import sqlite3 import sys from werkzeug.utils import secure_filename from werkzeug.security import check_password_hash, generate_password_hash from build_dir import build_dir import sanitize_path from db import get_db from user import create_user, user_exists, gen_default_user, get_user, update_user import html logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) app = Flask(__name__) app.config["SECRET_KEY"] = os.urandom(256) # TODO: change to environemnt variable app.config["CRYPTO_KEY"] = Fernet.generate_key() # TODO put this somewhere where it wont update often possibly environmnet analize impact of changing. path = os.getcwd() database = os.path.join(path, 'ansible.db') db = get_db(app) def login_required(view): @functools.wraps(view) def wrapped_view(**kwargs): if 'authenticated' not in session: return redirect(url_for('login')) return view(**kwargs) return wrapped_view @app.route('/', defaults={'loc': ""}, methods=('GET',)) @app.route('/<path:loc>', methods=('GET',)) @login_required def ansible(loc): logging.debug('made it here') sanitize_path.sanitize(loc) # TODO: if loc is empty return the home directory for the node # possible security concern - could ask for a higher level node # TODO: for future addition of link sending - store encrypted version # of top level directory in session can possibly use a werkzeug module # TODO: check if input is an encrypted link (use a /share/ or something to indicate) # TODO: process encrypted link # TODO: process a normal link # TODO: get the the home directory # TODO: authenticate the requested directory logging.debug(loc) currentDir = os.path.join('cloud-drive', loc) #update to be maliable for sharing currentPath = os.path.join(path, currentDir) logging.debug(os.path.splitext(currentPath)[1]) logging.debug(currentDir) logging.debug(path) logging.debug(currentPath) logging.debug(loc) fileExtension = os.path.splitext(currentPath)[1] if fileExtension: splitUrl = currentPath.rsplit('/', 1) localDir = splitUrl[0] filename = splitUrl[1] absPath = os.path.join(path, 'cloud-drive', localDir) return send_from_directory(directory=absPath, filename=filename) directoryDict = build_dir(currentPath) return render_template('index-alt.html', directory=directoryDict, curDir=loc) @app.route("/login", methods=('GET', 'POST')) def login(): if request.method == 'POST': username = request.form['username'] password = request.form['password'] error = None user = get_user(username) if user is not None: user_password = user[1] if not check_password_hash(user_password, password): error = 'Incorrect password, please try again.' else: error = 'User not found' if error is None: session.clear() session['authenticated'] = 'true' session['user_id'] = token_urlsafe() return redirect(url_for('ansible')) flash(error) return render_template('login.html') @app.route("/signup", methods=('GET','POST')) def signup(): if request.method == 'POST': username = request.form['name'] password = request.form['password'] error = None if not user_exists(username): create_user(username,password) else: error = 'Username already exists.' if error is None: return redirect(url_for('login')) flash(error) return render_template('signup.html') @app.route("/updatepassword", methods=('GET','POST')) def update_password(): if request.method == 'POST': username = request.form['username'] prev_password = request.form['password'] new_password = request.form['new_password'] verified_new_password = request.form['verify_new_password'] error = None if(new_password == verified_new_password): if user_exists(username): update_user(username,prev_password,new_password) else: error = 'User doesnt exist.' else: error = 'Passwords do not match' if error is None: return redirect(url_for('login')) flash(error) return render_template('update-password.html') @app.route("/logout", methods=('GET',)) def logout(): del session['authenticated'] return redirect(url_for('login'))
/build_dir.py
# build_dir.py import os def build_dir(curPath): directoryDict = {} with os.scandir(curPath) as directory: for entry in directory: #dont include shortcuts and hidden files if not entry.name.startswith('.'): #stat dict reference: #https://docs.python.org/2/library/stat.html fileStats = entry.stat() directoryDict[entry.name] = {"is_dir" : entry.is_dir(), "size" : fileStats.st_size} return directoryDict
/db.py
from getpass import getpass import os import sqlite3 from werkzeug.security import generate_password_hash from flask import g import traceback import logging path = os.getcwd() DATABASE = os.path.join(path, 'ansible.db') def init_db(): with app.app_context(): db = sqlite3.connect(DATABASE) with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() def get_db(app): with app.app_context(): if 'db' not in g: g.db = sqlite3.connect( DATABASE, detect_types=sqlite3.PARSE_DECLTYPES ) g.db.row_factory = sqlite3.Row return g.db def insert(table,columnTuple,valueTuple): try: dbConnection = sqlite3.connect(DATABASE) columnTupleString = ', '.join(columnTuple) dbConnection.execute( 'INSERT INTO ' + table + ' (' + columnTupleString + ') VALUES (?, ?)', (valueTuple) ) dbConnection.commit() except Exception as e: logging.error(traceback.format_exc()) def select_one(table, return_columns, query_column, value): try: dbConnection = sqlite3.connect(DATABASE) result = (dbConnection.execute( 'SELECT ' + ', '.join(return_columns) + ' FROM ' + table + ' WHERE ' + query_column + '= (?) Limit 1', (value,) ).fetchone()) return result except Exception as e: logging.error(traceback.format_exc()) print("User existence check failed") def exists(table,column,value): try: dbConnection = sqlite3.connect(DATABASE) result = dbConnection.execute( 'SELECT CASE WHEN EXISTS( SELECT 1 FROM ' + table + ' WHERE ' + column + '= (?)) THEN 1 ELSE 0 END', (value,) ).fetchone() if result[0] == 1: return True else: return False except Exception as e: logging.error(traceback.format_exc()) def update(table, update_dict, query_column, query_value): try: dbConnection = sqlite3.connect(DATABASE) result = (dbConnection.execute( 'UPDATE ' + table + ' SET ' + build_set_statement(update_dict) + ' WHERE ' + query_column + '= (?)', (query_value,) ).fetchone()) dbConnection.commit() return result except Exception as e: logging.error(traceback.format_exc()) def build_set_statement(updated_field_dict): setItems = [] for field in updated_field_dict: setItems.append(field + ' = \'' + updated_field_dict[field] + '\'') setFields = ', '.join(setItems) return setFields
/penetrationTesting.py
from bs4 import BeautifulSoup import getpass import requests import os def pTest(attack_string, attack_url, password): payload = {'password': password} with requests.Session() as s: p = s.post(attack_url + 'login', data=payload) r = requests.Request('GET', attack_url) prepared = s.prepare_request(r) prepared.url += attack_string response = s.send(prepared) print('Sending request with url:', prepared.url) #print('Request successful:', response.ok) if response.ok: soup = BeautifulSoup(response.text, 'html.parser') safeResponse = s.get(attack_url) soup2 = BeautifulSoup(safeResponse.text, 'html.parser') if (response.text == safeResponse.text): print("Attack Failed - Attack Led to Top Directory") else: print("Attack may have succeded") print("Attack response tags:") for link in soup.find_all('a'): print(link.get('href')) print('') print('Safe Output') print('') for link in soup2.find_all('a'): print(link.get('href')) else: print('Attack Failed - No Such Directory') def pWrap(attack_string): pTest(attack_string=attack_string, attack_url=ATTACK_URL, password=PASSWORD) PASSWORD = os.getenv('PWRD') ATTACK_URL ='http://127.0.0.1:5050/' ATTACK_STRINGS = [ '../../../..', 'test/../.././.../', '..', 'level1/../..', 'level1/../../', 'pwd' ] if __name__ == '__main__': if not PASSWORD: PASSWORD = print('First set environment variable PWRD. (export PWRD=YOUR_PASSWORD)') else: for attack in ATTACK_STRINGS: pWrap(attack)
/sanitize_path.py
import re def sanitize(path): # escape nasty double-dots path = re.sub(r'\.\.', '', path) # then remove any duplicate slashes path = re.sub(r'(/)\1+', r'\1', path) # then remove any leading slashes and dots while(path and (path[0] == '/' or path[0] == '.')): path = path[1:] return path
/user.py
from db import insert, exists, select_one, update from werkzeug.security import check_password_hash, generate_password_hash import logging import traceback def create_user(username,password): try: formattedUsername = format_username(username) hashedPassword = generate_password_hash(password) insert( 'user', ('username', 'password'), (formattedUsername, hashedPassword)) except Exception as e: logging.error(traceback.format_exc()) def user_exists(username): try: formattedUsername = format_username(username) return exists('user','username',formattedUsername) except Exception as e: logging.error(traceback.format_exc()) print("User existence check failed") def get_user(username): try: formattedUsername = format_username(username) return select_one('user',('username','password'), 'username',formattedUsername) except Exception as e: logging.error(traceback.format_exc()) print("Failed to get user") def update_user(username,password,new_password): try: formattedUsername = format_username(username) user = get_user(formattedUsername) user_password = user[1] if(user is not None): if(check_password_hash(user_password,password)): newHashedPassword = generate_password_hash(new_password) update('user',{'password':newHashedPassword},'username',formattedUsername) except: logging.error(traceback.format_exc()) def gen_default_user(): while(True): password = getpass(prompt='Create a password, at least 8 characters: ') password2 = getpass(prompt='Confirm password: ') if password == password2: if len(password) < 8: print('Password must be at least 8 characters.') else: break else: print('Passwords do not match') try: create_user('default',password) except: logging.error(traceback.format_exc()) def format_username(username): return username.lower()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
MatheusLealAquino/meuCanal
refs/heads/master
{"/conteudo/forms.py": ["/conteudo/models.py"], "/conteudo/views.py": ["/conteudo/models.py"]}
└── ├── conteudo │ ├── forms.py │ ├── models.py │ ├── urls.py │ └── views.py ├── login │ ├── urls.py │ └── views.py └── projeto └── views.py
/conteudo/forms.py
from django import forms from conteudo.models import Video, Categoria class VideoForm(forms.ModelForm): error_messages = { 'campo invalido' : "Campo inválido" } class Meta: model = Video fields = ('video_id','categoria', 'nome', 'url', 'capa', 'visualizacao', 'nota', 'sinopse') video_id = forms.CharField(widget=forms.HiddenInput(), required=False) categoria = forms.ModelChoiceField( error_messages={'required': 'Campo obrigatório', }, queryset=Categoria.objects.all().order_by(id), empty_label='--- Selecionar a Categoria ---', widget=forms.Select(attrs={'class': 'form-control form-control-sm'}), required=True ) nome = forms.CharField( error_messages = {'required', 'Campo obrigatório',}, widget=forms.TextInput(attrs={'class': 'form-control form-control-sm', 'maxlength': '120'}), required=True )
/conteudo/models.py
from django.db import models class Categoria(models.Model): nome = models.CharField(max_length=255, db_index=True) slug = models.SlugField(max_length=200) class Meta: ordering = ('nome',) verbose_name = 'categoria' verbose_name_plural = 'categorias' def __str__(self): return self.nome def videosCategoria(self): return Video.objects.all().filter(categoria_id=self.id).order_by('-id')[:4] class Video(models.Model): categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING) nome = models.CharField(max_length=255) url = models.FileField(upload_to='conteudo/videos/') capa = models.FileField(upload_to='conteudo/images/') visualizacao = models.DecimalField(max_digits=10, decimal_places=1, default=0) nota = models.FloatField(max_length=20) sinopse = models.CharField(max_length=500) class Meta: ordering = ('nome',) verbose_name = 'video' verbose_name_plural = 'videos' def __str__(self): return self.nome
/conteudo/urls.py
from django.urls import path from conteudo import views app_name = 'conteudo' urlpatterns = [ path('', views.exibir_catalogo, name='catalogo'), path('cadastro_video/', views.cadastro_video, name='cadastro_video'), path('editar_video/<int:id>/', views.editar_video, name='editar_video'), path('<int:id>/', views.exibir_video, name='exibir_video'), path('categoria/', views.lista_categoria, name='listar_todas_categorias'), path('categoria/<int:id>/', views.lista_categoria, name='lista_categoria'), ]
/conteudo/views.py
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.shortcuts import render, redirect, get_object_or_404 from conteudo.models import Video, Categoria def exibir_catalogo(request): categorias = Categoria.objects.all() return render(request, 'conteudo/catalogo_videos.html', {'categorias': categorias}) def cadastro_video(request): return render(request, 'conteudo/cadastro_video.html') def editar_video(request): return render(request, 'conteudo/editar_video.html') def lista_categoria(request, id=None): categorias = Categoria.objects.all() if id != None: videos_lista = Video.objects.all().filter(categoria_id=id) else: videos_lista = Video.objects.all() paginator = Paginator(videos_lista, 3) page = request.GET.get('page',1) try: videos = paginator.page(page) except PageNotAnInteger: videos = paginator.page(1) except EmptyPage: videos = paginator.page(paginator.num_pages) return render(request, 'conteudo/lista_categoria.html', {'categorias': categorias, 'videos' : videos}) def exibir_video(request, id): video = get_object_or_404(Video, id= id) categorias = Categoria.objects.all() return render(request, 'conteudo/player_video.html', {'video':video, 'categorias':categorias})
/login/urls.py
from django.urls import path from login import views app_name = 'login' urlpatterns = [ path('', views.pagina_login, name='pagina_login'), ]
/login/views.py
from django.shortcuts import render def pagina_login(request): return render(request, 'login/pagina_login.html')
/projeto/views.py
from django.shortcuts import render def pagina_inicial(request): return render(request, 'index.html')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
paolapilar/juegos
refs/heads/master
{"/main.py": ["/snake.py", "/collectables.py"], "/world.py": ["/snake.py", "/base.py", "/collectables.py"]}
└── ├── base.py ├── collectables.py ├── main.py ├── screen.py ├── snake.py ├── utils.py └── world.py
/base.py
import math import utils class Entity( object ) : def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) : super( Entity, self ).__init__() self.i = i self.j = j self._cellSize = cellSize self._canvasWidth = canvasWidth self._canvasHeight = canvasHeight self._di = di self._dj = dj self._x, self._y = utils.grid2screen( i, j, cellSize, canvasWidth, canvasHeight ) self._w = di * cellSize self._h = dj * cellSize self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 ) self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 ) def x( self ) : return self._x def y( self ) : return self._y def xc( self ) : return self._xc def yc( self ) : return self._yc def w( self ) : return self._w def h( self ) : return self._h def update( self ) : self._x, self._y = utils.grid2screen( self.i, self.j, self._cellSize, self._canvasWidth, self._canvasHeight ) self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 ) self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 ) def hit( self, other ) : _dx = abs( self._xc - other.xc() ) _dy = abs( self._yc - other.yc() ) if ( _dx < ( self._w / 2. ) + ( other.w() / 2. ) and _dy < ( self._h / 2. ) + ( other.h() / 2. ) ) : return True else : return False
/collectables.py
import pygame import base class Apple( base.Entity ) : def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) : super( Apple, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight ) self._color = ( 255, 255, 0 ) self._alive = True def draw( self, canvas ) : _xleft = self._x - 0.5 * self._cellSize _ytop = self._y - 0.5 * self._cellSize pygame.draw.rect( canvas, self._color, (_xleft, _ytop, self._w, self._h) )
/main.py
import pygame import random import time from snake import Snake from collectables import Apple import screen class Game : def __init__( self ) : pygame.init() self._canvasWidth = 800 self._canvasHeight = 600 self._canvas = pygame.display.set_mode( ( self._canvasWidth, self._canvasHeight ) ) self._gameExit = False self._keys = { 'up' : False, 'down' : False, 'right' : False, 'left' : False, 'enter' : False, 'escape' : False } self._screen = screen.MenuScreen( self._canvas ) self._screenName = 'menu' def _getEvents( self ) : for event in pygame.event.get() : if event.type == pygame.QUIT : self._gameExit = True elif event.type == pygame.KEYDOWN : if event.key == pygame.K_UP : self._keys['up'] = True elif event.key == pygame.K_DOWN : self._keys['down'] = True elif event.key == pygame.K_RIGHT : self._keys['right'] = True elif event.key == pygame.K_LEFT : self._keys['left'] = True elif event.key == pygame.K_RETURN : self._keys['enter'] = True elif event.key == pygame.K_ESCAPE : self._keys['escape'] = True elif event.type == pygame.KEYUP : if event.key == pygame.K_UP : self._keys['up'] = False elif event.key == pygame.K_DOWN : self._keys['down'] = False elif event.key == pygame.K_RIGHT : self._keys['right'] = False elif event.key == pygame.K_LEFT : self._keys['left'] = False elif event.key == pygame.K_RETURN : self._keys['enter'] = False elif event.key == pygame.K_ESCAPE : self._keys['escape'] = False def _updateScreen( self ) : self._screen.setKeys( self._keys ) self._screen.update() self._screen.draw() if self._screenName == 'menu' and self._keys['enter'] == True : self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight ) self._screenName = 'game' elif self._screenName == 'game' and self._screen.lose() : self._screen = screen.GameOverScreen( self._canvas ) self._screenName = 'gameover' elif self._screenName == 'game' and self._screen.win() : self._screen = screen.MenuScreen( self._canvas ) self._screenName = 'menu' elif self._screenName == 'gameover' and self._keys['enter'] == True : self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight ) self._screenName = 'game' elif self._screenName == 'gameover' and self._keys['escape'] == True : self._screen = screen.MenuScreen( self._canvas ) self._screenName = 'menu' def run( self ) : while not self._gameExit : self._getEvents() self._updateScreen() # actualizar el canvas pygame.display.update() # esperar un ratito time.sleep( 0.001 ) if __name__ == '__main__' : _game = Game() _game.run()
/screen.py
import pygame import world class Text( object ) : def __init__( self, x, y, message, size, color ) : super( Text, self).__init__() self._message = message self._textFont = pygame.font.Font( None, size ) self._textSurface = self._textFont.render( message, True, color ) self._textRect = self._textSurface.get_rect() self._textRect.center = ( x, y ) def draw( self, canvas ) : canvas.blit( self._textSurface, self._textRect ) class Screen( object ) : def __init__( self, canvas, backgroundColor ) : super( Screen, self ).__init__() self._canvas = canvas self._backgroundColor = backgroundColor self._texts = [] self._keys = None def setKeys( self, keys ) : self._keys = keys def addText( self, text ) : self._texts.append( text ) def draw( self ) : self._canvas.fill( self._backgroundColor ) for i in range( len( self._texts ) ) : self._texts[i].draw( self._canvas ) def update( self ) : pass class MenuScreen( Screen ) : def __init__( self, canvas ) : super( MenuScreen, self ).__init__( canvas, ( 255, 255, 0 ) ) self._textTitle = Text( 100, 100, 'SNAKE', 50, ( 0, 0, 0 ) ) self._textPlay = Text( 100, 400, 'PLAY', 40, ( 255, 255, 255 ) ) self.addText( self._textTitle ) self.addText( self._textPlay ) class GameOverScreen( Screen ) : def __init__( self, canvas ) : super( GameOverScreen, self ).__init__( canvas, ( 0, 0, 0 ) ) self._textGameOver = Text( 100, 100, 'GAME OVER :(', 50, ( 255, 0, 255 ) ) self._textContinue = Text( 100, 400, 'Continue???', 40, ( 255, 255, 255 ) ) self.addText( self._textGameOver ) self.addText( self._textContinue ) class GameScreen( Screen ) : def __init__( self, canvas, canvasWidth, canvasHeight ) : super( GameScreen, self ).__init__( canvas, ( 255, 255, 255 ) ) self._world = world.World( 40, canvasWidth, canvasHeight ) def draw( self ) : super( GameScreen, self ).draw() self._world.draw( self._canvas ) def update( self ) : self._world.setKeys( self._keys ) self._world.update() def lose( self ) : return self._world.lose() def win( self ) : return self._world.win()
/snake.py
import pygame import base from collections import deque class SnakePart( base.Entity ) : def __init__( self, i, j, color, cellSize, canvasWidth, canvasHeight ) : super( SnakePart, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight ) self.color = color self.lasti = i self.lastj = j def draw( self, canvas ) : _xleft = self._x - 0.5 * self._cellSize _ytop = self._y - 0.5 * self._cellSize pygame.draw.rect( canvas, self.color, (_xleft, _ytop, self._w, self._h) ) class Snake( base.Entity ) : def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) : super( Snake, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight ) self._bodyParts = [ SnakePart( i, j, ( 50, 50, 50 ), cellSize, canvasWidth, canvasHeight ) ] self._speed = 800. self._direction = 'left' self._displacement = 0.0 self._frameTime = 0.001 self._nx = int( canvasWidth / cellSize ) self._ny = int( canvasHeight / cellSize ) self._alive = True def alive( self ) : return self._alive def head( self ) : return self._bodyParts[0] def tail( self ) : return self._bodyParts[-1] def setDirection( self, direction ) : if len( self._bodyParts ) > 1 : # chequear si quieren ir a la direccion contraria if ( self._direction == 'left' and direction == 'right' or self._direction == 'right' and direction == 'left' or self._direction == 'up' and direction == 'down' or self._direction == 'down' and direction == 'up' ) : # mantener la misma direccion self._direction = self._direction else : # cambiar la direction self._direction = direction else : self._direction = direction def grow( self ) : _i = self.tail().lasti _j = self.tail().lastj _newPart = SnakePart( _i, _j, ( 50, 50, 50 ), self._cellSize, self._canvasWidth, self._canvasHeight ) self._bodyParts.append( _newPart ) def update( self ) : self._displacement = self._displacement + self._speed * self._frameTime if self._displacement > self._cellSize : self.head().lasti = self.head().i self.head().lastj = self.head().j # mover una casilla en la direccion adecuada if self._direction == 'up' : self.head().j += 1 elif self._direction == 'down' : self.head().j -= 1 elif self._direction == 'right' : self.head().i += 1 elif self._direction == 'left' : self.head().i -= 1 for k in range( 1, len( self._bodyParts ) ) : self._bodyParts[k].lasti = self._bodyParts[k].i self._bodyParts[k].lastj = self._bodyParts[k].j self._bodyParts[k].i = self._bodyParts[k-1].lasti self._bodyParts[k].j = self._bodyParts[k-1].lastj # resetear el acumulador self._displacement = 0.0 if self.head()._x > 800. and self._direction == 'right' : self.head().i = 0 if self.head()._x < 0. and self._direction == 'left' : self.head().i = self._nx if self.head()._y > 600. and self._direction == 'down' : self.head().j = self._ny if self.head()._y < 0. and self._direction == 'up' : self.head().j = 0 for k in range( len( self._bodyParts ) ) : self._bodyParts[k].update() for i in range( 1, len( self._bodyParts ) ) : if self.head().hit( self._bodyParts[i] ): self._alive = False def draw( self, canvas ) : for k in range( len( self._bodyParts ) ) : self._bodyParts[k].draw( canvas ) ## # la misma forma de iterar ## for bodyPart in self._bodyParts : ## bodyPart.draw( canvas )
/utils.py
import math def grid2screen( i, j, cellSize, canvasWidth, canvasHeight ) : x = ( i + 0.5 ) * cellSize y = canvasHeight - ( j + 0.5 ) * cellSize return x, y def screen2grid( x, y, cellSize, canvasWidth, canvasHeight ) : i = math.floor( x / cellSize - 0.5 ) j = math.floor( ( canvasHeight - y ) / cellSize - 0.5 ) return i, j
/world.py
import math import random import pygame from base import Entity from snake import Snake from collectables import Apple class Obstacle( Entity ) : def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) : super( Obstacle, self ).__init__( i, j, di, dj, cellSize, canvasWidth, canvasHeight ) self._color = ( 255, 0, 0 ) def draw( self, canvas ) : _xleft = self._x - 0.5 * self._cellSize _ytop = self._y - 0.5 * self._cellSize pygame.draw.rect( canvas, self._color, (_xleft, _ytop, self._w, self._h) ) class World( object ) : def __init__( self, cellSize, canvasWidth, canvasHeight, level = 1 ) : super( World, self ).__init__() self._cellSize = cellSize self._canvasWidth = canvasWidth self._canvasHeight = canvasHeight self._level = level self._nx = int( self._canvasWidth / self._cellSize ) self._ny = int( self._canvasHeight / self._cellSize ) self._maxLives = 4 self._numLives = 4 self._snake = Snake( int( self._nx / 2. ), int( self._ny / 2. ), self._cellSize, self._canvasWidth, self._canvasHeight ) self._gameWin = False self._gameOver = False self._keys = None self._points = 0 self._font = pygame.font.Font( None, 40 ) self._obstacles = [] self._occupied = [] self._apples = [] self._createObstacles() self._createWalls() for obstacle in self._obstacles : self._occupied.append( ( obstacle.i, obstacle.j ) ) self._createApples( 1 ) if self._level == 1 : self._snake._speed = 800. elif self._level == 2 : self._snake._speed = 2100. elif self._level == 3 : self._snake._speed = 2100. def _createObstacles( self ) : if self._level == 1 : return elif self._level == 2 : while len( self._obstacles ) < 5 : _i = random.randint(0, self._nx) _j = random.randint(0, self._ny) if _i == int( self._nx / 2 ) and _j == int( self._ny / 2 ) : continue self._obstacles.append( Obstacle( _i, _j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) elif self._level == 3 : while len( self._obstacles ) < 10 : _i = random.randint(0, self._nx) _j = random.randint(0, self._ny) if _i == int( self._nx / 2 ) and _j == int( self._ny / 2 ) : continue self._obstacles.append( Obstacle( _i, _j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) def _createWalls( self ) : if self._level == 1 : return elif self._level == 2 : for i in range( self._nx ) : self._obstacles.append( Obstacle( i, 0, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) self._obstacles.append( Obstacle( i, self._ny - 1, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) for j in range( self._ny ) : self._obstacles.append( Obstacle( 0, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) self._obstacles.append( Obstacle( self._nx - 1, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) elif self._level == 3 : for i in range( self._nx ) : if i == int( self._nx / 2 ) : continue self._obstacles.append( Obstacle( i, 0, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) self._obstacles.append( Obstacle( i, self._ny - 1, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) for j in range( self._ny ) : if j == int( self._ny / 2 ) : continue self._obstacles.append( Obstacle( 0, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) self._obstacles.append( Obstacle( self._nx - 1, j, 1, 1, self._cellSize, self._canvasWidth, self._canvasHeight ) ) def _createApples( self, maxApples = 20 ) : while True : _i = random.randint( 2, self._nx - 2 ) _j = random.randint( 2, self._ny - 2 ) _canCreate = True for _occupiedPosition in self._occupied : _ioccupied = _occupiedPosition[0] _joccupied = _occupiedPosition[1] if _i == _ioccupied and _j == _joccupied : _canCreate = False break if _canCreate : self._apples.append( Apple( _i, _j, self._cellSize, self._canvasWidth, self._canvasHeight ) ) if len( self._apples ) >= maxApples : break def setKeys( self, keys ) : self._keys = keys def restart( self ) : self._points = 0 self._snake = Snake( int( self._nx / 2. ), int( self._ny / 2. ), self._cellSize, self._canvasWidth, self._canvasHeight ) if self._level == 1 : self._snake._speed = 800. elif self._level == 2 : self._snake._speed = 2100. elif self._level == 3 : self._snake._speed = 2100. self._apples = [] self._obstacles = [] self._occupied = [] self._createObstacles() self._createWalls() for obstacle in self._obstacles : self._occupied.append( ( obstacle.i, obstacle.j ) ) self._createApples( 1 ) def _drawGrid( self, canvas ) : for i in range( self._nx ) : xline = ( i + 1 ) * self._cellSize pygame.draw.line( canvas, ( 0, 0, 0 ), ( xline, 0 ), ( xline, self._canvasHeight ), 1 ) for j in range( self._ny ) : yline = ( j + 1 ) * self._cellSize pygame.draw.line( canvas, ( 0, 0, 0 ), ( 0, yline ), ( self._canvasWidth, yline ), 1 ) def _drawScore( self, canvas ) : _textSurface = self._font.render( 'Puntaje: %d - Vidas: %d' % ( self._points, self._numLives ), True, ( 0, 0, 255 ) ) _textSurface.get_rect().center = ( 30, 30 ) canvas.blit( _textSurface, _textSurface.get_rect() ) def draw( self, canvas ) : self._drawGrid( canvas ) self._snake.draw( canvas ) for obstacle in self._obstacles : obstacle.draw( canvas ) for apple in self._apples : apple.draw( canvas ) self._drawScore( canvas ) def update( self ) : if self._keys : if self._keys['up'] == True : self._snake.setDirection( 'up' ) elif self._keys['down'] == True : self._snake.setDirection( 'down' ) elif self._keys['right'] == True : self._snake.setDirection( 'right' ) elif self._keys['left'] == True : self._snake.setDirection( 'left' ) self._snake.update() for obstacle in self._obstacles : obstacle.update() if self._snake.head().hit( obstacle ) : self._snake._alive = False if not self._snake.alive() : self._numLives = self._numLives - 1 if self._numLives >= 1 : self.restart() else : self._gameOver = True return for i in range( len( self._apples ) ) : self._apples[i].update() if self._snake.head().hit( self._apples[i] ) : self._apples[i]._alive = False self._snake.grow() self._points = self._points + 1 self._createApples( 1 ) if self._level == 1 and self._points >= 5 : self._level = 2 self._numLives = 4 self._points = 0 self.restart() elif self._level == 2 and self._points >= 10 : self._level = 3 self._numLives = 4 self._points = 0 self.restart() elif self._level == 3 and self._points >= 15 : self._gameWin = True return _newApples = [] for apple in self._apples : if apple._alive : _newApples.append( apple ) self._apples = _newApples def lose( self ) : return self._gameOver def win( self ) : return self._gameWin
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
darshanime/scrapy-tutorials
refs/heads/master
{"/housing/housing/spiders/housing_spider.py": ["/housing/housing/items.py"], "/scrapy 101/scrapy101/spiders/dmoz.py": ["/scrapy 101/scrapy101/items.py"], "/cardekho/cardekho/spiders/cardekho_spider.py": ["/cardekho/cardekho/items.py"]}
└── ├── cardekho │ └── cardekho │ ├── items.py │ └── spiders │ └── cardekho_spider.py ├── housing │ └── housing │ ├── items.py │ └── spiders │ └── housing_spider.py └── scrapy 101 └── scrapy101 ├── items.py └── spiders └── dmoz.py
/cardekho/cardekho/items.py
from scrapy import Item, Field class CardekhoItem(Item): title = Field() price = Field() distance = Field()
/cardekho/cardekho/spiders/cardekho_spider.py
from cardekho.items import CardekhoItem from scrapy import Spider from scrapy.http.request import Request class CardekhoSpider(Spider): name = "cardekho" allowed_domains = ["http://www.cardekho.com"] start_urls = ["http://www.cardekho.com/used-cars+in+mumbai-all/"] #This is to not get redirected by CarDekho. We are identifying ourselves as a web-browser. custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'} def start_requests(self): #There are 162 pages, we are asking Scrapy to get us all of them. for i in range(162): yield Request("http://www.cardekho.com/used-cars+in+mumbai-all/" + str(i), self.parse) def parse(self, response): for sel in response.xpath('/html/body/main/div/div[2]/div[2]/div[9]/form/ul/li'): item = CardekhoItem() item ['title'] = sel.xpath('div[1]/div[2]/div[1]/a/text()').extract() item ['price'] = sel.xpath('div[1]/div[3]/div[1]/text()').extract() item ['distance'] = sel.xpath('div[1]/div[2]/div[3]/ul/li[1]/div[2]/span/text()').extract() yield item
/housing/housing/items.py
from scrapy import Item, Field class HousingItemBuy(Item): ad_id = Field() ad_title = Field() ad_price = Field() ad_area = Field() ad_url = Field() ad_date_added = Field() ad_coordinates = Field() ad_bedrooms = Field() ad_toilets = Field() ad_gas_pipeline = Field() ad_lift = Field() ad_parking = Field() ad_gym = Field() ad_swimming_pool = Field() ad_city = Field() ad_locality = Field() ad_contact_persons_name = Field() ad_contact_persons_number = Field() ad_contact_persons_id = Field() count = Field()
/housing/housing/spiders/housing_spider.py
from housing.items import HousingItemBuy from scrapy import Spider from scrapy.http.request import Request #To parse the JSON received import json class HousingSpider(Spider): name = "housing" allowed_domains = ["housing.com"] custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'} def start_requests(self): #We have 1080 pages to fetch for count in range(1,1081): print "Getting page : %s" %count yield Request("https://buy.housing.com/api/v1/buy/index/filter?poly=f97f947ffae6408ac295&results_per_page=30&p=" + str(count) + "&resale_total_count=30045&np_total_count=2329", self.parse_buy) def parse_buy(self, response): #Since the response is purely JSON text = response.body #Parsing it using the builtin json utility parsed_json = json.loads(text) #For each entry, we will store all the information we defined earlier in items.py #The parsed json can be read as a dict. Examining the JSON, we can easily navigate #to where we have the data we need. for iter in range(30): item = HousingItemBuy() item['ad_price'] = parsed_json["hits"][iter]["formatted_price"] item['ad_url'] = parsed_json["hits"][iter]["inventory_canonical_url"] item['ad_title'] = parsed_json["hits"][iter]["title"] item['ad_coordinates'] = parsed_json["hits"][iter]["location_coordinates"] item['ad_date_added'] = parsed_json["hits"][iter]["date_added"] item['ad_area'] = parsed_json["hits"][iter]["inventory_configs"][0]["area"] item['ad_bedrooms'] = parsed_json["hits"][iter]["inventory_configs"][0]["number_of_bedrooms"] item['ad_toilets'] = parsed_json["hits"][iter]["inventory_configs"][0]["number_of_toilets"] item['ad_contact_persons_number'] = parsed_json["hits"][iter]["contact_persons_info"][0]["contact_no"] item['ad_contact_persons_id'] = parsed_json["hits"][iter]["contact_persons_info"][0]["profile_id"] item['ad_contact_persons_name'] = parsed_json["hits"][iter]["contact_persons_info"][0]["name"] #Some entries do not have the ad_city/ad_locality variable. try: item['ad_city'] = parsed_json["hits"][iter]["display_city"][0] except : item['ad_city'] = "None given" try: item['ad_locality'] = parsed_json["hits"][iter]["display_city"][1] except : item['ad_locality'] = "None given" item['ad_gas_pipeline'] = parsed_json["hits"][iter]["inventory_amenities"]["has_gas_pipeline"] item['ad_lift'] = parsed_json["hits"][iter]["inventory_amenities"]["has_lift"] item['ad_parking'] = parsed_json["hits"][iter]["inventory_amenities"]["has_parking"] item['ad_gym'] = parsed_json["hits"][iter]["inventory_amenities"]["has_gym"] item['ad_swimming_pool'] = parsed_json["hits"][iter]["inventory_amenities"]["has_swimming_pool"] item['ad_id'] = parsed_json["hits"][iter]["id"] yield item
/scrapy 101/scrapy101/items.py
from scrapy import Item, Field class Scrapy101Item(Item): title = Field()
/scrapy 101/scrapy101/spiders/dmoz.py
from scrapy.spiders import BaseSpider from scrapy101.items import Scrapy101Item class Scrapy101Spider(BaseSpider): name = "dmoz" allowed_domains = ["dmoz.org/"] start_urls = ["http://www.dmoz.org/"] def parse(self, response): for div in response.xpath('/html/body/div[3]/div[3]/div[1]/div'): for entry in div.xpath('span'): item = Scrapy101Item() item['title'] = entry.xpath('a/text()').extract() print item['title']
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
gausszh/sae_site
refs/heads/master
{"/views/blog.py": ["/utils/blog_cache.py", "/models/blog.py"], "/utils/__init__.py": ["/models/base.py"], "/utils/user_cache.py": ["/models/base.py", "/models/blog.py", "/utils/__init__.py"], "/views/base.py": ["/models/base.py"], "/utils/blog_cache.py": ["/utils/__init__.py"]}
└── ├── configs │ └── settings_dev.py ├── flask_app.py ├── models │ ├── __init__.py │ ├── base.py │ └── blog.py ├── utils │ ├── __init__.py │ ├── blog_cache.py │ ├── filters.py │ └── user_cache.py └── views ├── base.py ├── blog.py └── security.py
/configs/settings_dev.py
#coding=utf8 import os # system setting DEBUG = True APP_HOST = '127.0.0.1' APP_PORT = 7020 STORAGE_BUCKET_DOMAIN_NAME = 'blogimg' # database if os.environ.get('SERVER_SOFTWARE'):#线上 import sae DB_SAE_URI = 'mysql://%s:%s@%s:%s/database_name' % (sae.const.MYSQL_USER, sae.const.MYSQL_PASS, sae.const.MYSQL_HOST, sae.const.MYSQL_PORT) DB_POOL_RECYCLE_TIMEOUT = 10 DB_ECHO = True else: DB_SAE_URI = 'mysql://user:pass@127.0.0.1:3306/database_name' # DB_SAE_URI = 'sqlite:////database.db' DB_POOL_RECYCLE_TIMEOUT = 10 DB_ECHO = True # cache REDIS_HOST = "127.0.0.1" REDIS_PORT = 6379 REDIS_DB = 1 CACHE_TIMEOUT = 3 # app API_KEY = '***' API_SECRET = '****' REDIRECT_URI = 'http://****'
/flask_app.py
#!/usr/bin/python # coding=utf8 from flask import Flask, render_template, g import flask_login from configs import settings from utils.filters import JINJA2_FILTERS from utils import user_cache from views import blog, base, security def create_app(debug=settings.DEBUG): app = Flask(__name__) app.register_blueprint(blog.bp_blog) app.register_blueprint(base.bp_base) app.register_blueprint(security.bp_security) app.jinja_env.filters.update(JINJA2_FILTERS) app.debug = debug app.secret_key = "gausszh" @app.route('/') def index(): return render_template('index.html') @app.before_request def check_user(): g.user = flask_login.current_user login_manager = flask_login.LoginManager() login_manager.setup_app(app) @login_manager.user_loader def load_user(userid): user = user_cache.get_user(userid, format='object') return user login_manager.unauthorized = blog.list # login_manager.anonymous_user = AnonymousUserMixin return app app = create_app(settings.DEBUG) if __name__ == '__main__': host = settings.APP_HOST port = settings.APP_PORT app.run(host=host, port=port)
/models/__init__.py
#coding=utf-8 from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from configs import settings sae_engine = create_engine(settings.DB_SAE_URI+'?charset=utf8', encoding='utf-8', convert_unicode=True, pool_recycle=settings.DB_POOL_RECYCLE_TIMEOUT, echo=settings.DB_ECHO) create_session = sessionmaker(autocommit=False, autoflush=False, bind=sae_engine) Base = declarative_base()
/models/base.py
#coding=utf8 """ 基础类--用户信息 """ from sqlalchemy import ( MetaData, Table, Column, Integer, BigInteger, Float, String, Text, DateTime, ForeignKey, Date, UniqueConstraint) from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from models import sae_engine from models import create_session Base = declarative_base() metadata = MetaData() class User(Base): """ 发布历史日志 """ __tablename__ = 'user' __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} id = Column(Integer, primary_key=True) open_id = Column(String(45), nullable=False, index=True) token = Column(String(64), nullable=False, index=True) name = Column(String(45)) email = Column(String(60)) address = Column(String(150)) tel = Column(String(15)) school = Column(String(45)) create_time = Column(DateTime) def is_authenticated(self): return True def is_active(self): return True def is_anonymous(self): return False def get_id(self): return unicode(self.id) def __repr__(self): return '<User %r>' % (self.name) if __name__ == '__main__': Base.metadata.create_all(bind=sae_engine)
/models/blog.py
#!/usr/bin/python #coding=utf8 import datetime from sqlalchemy import ( MetaData, Table, Column, Integer, BigInteger, Float, String, Text, DateTime, ForeignKey, Date, UniqueConstraint) from sqlalchemy.orm import relationship from sqlalchemy.ext.declarative import declarative_base from models import sae_engine from models import create_session Base = declarative_base() metadata = MetaData() class BlogArticle(Base): """ 发布历史日志 """ __tablename__ = 'blog_article' __table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'} id = Column(Integer, primary_key=True) title = Column(String(50)) markdown = Column(Text) html = Column(Text) create_by = Column(Integer, index=True, nullable=False) create_time = Column(DateTime, nullable=False) update_time = Column(DateTime, index=True, nullable=False,) is_active = Column(Integer, nullable=False, default=1) if __name__ == '__main__': Base.metadata.create_all(bind=sae_engine)
/utils/__init__.py
#coding=utf8 import datetime import redis import flask_login from models.base import User, create_session from utils import user_cache from configs import settings def AnonymousUserMixin(): ''' This is the default object for representing an anonymous user. ''' session = create_session() user = User() count = user_cache.get_anonymous_count() anonymouser_id = 1000 + count user.open_id = 'anonymous%s' % anonymouser_id user.name = u'游客%s' % anonymouser_id user.token = '' user.create_time = datetime.datetime.now() session.add(user) session.commit() user_cache.incr_anonymous_count() flask_login.login_user(user, remember=True) session.close() return user redis_pool = redis.ConnectionPool(host=settings.REDIS_IP, port=settings.REDIS_PORT, db=settings.REDIS_DB) def redis_connection(): return redis.Redis(connection_pool=redis_pool)
/utils/blog_cache.py
# coding=utf8 from configs import settings from utils import redis_connection APP = "blog" def set_draft_blog(uid, markdown): _cache = redis_connection() key = str("%s:draft:blog:%s" % (APP, uid)) _cache.set(key, markdown, settings.DRAFT_BLOG_TIMEOUT)
/utils/filters.py
# coding=utf8 """ jinja2的过滤器 """ import markdown def md2html(md): """ @param {unicode} md @return {unicode html} """ return markdown.markdown(md, ['extra', 'codehilite', 'toc', 'nl2br'], safe_mode="escape") JINJA2_FILTERS = { 'md2html': md2html, }
/utils/user_cache.py
# coding=utf8 try: import simplejson as json except Exception: import json import datetime from sqlalchemy.sql import or_ from models.base import create_session, User from models.blog import BlogArticle from configs import settings from utils import redis_connection # import sae.kvdb APP = "base" def get_user(uid, format="json"): _cache = redis_connection() key = str("%s:user:%s" % (APP, uid)) userinfo = _cache.get(key) new = False if not userinfo: session = create_session() userinfo = session.query(User).filter(or_(User.id == uid, User.open_id == uid)).first() userinfo = orm2json(userinfo) _cache.set(key, json.dumps(userinfo), settings.CACHE_TIMEOUT) new = True session.close() if not new: userinfo = json.loads(userinfo) if format == 'object' and userinfo: user = User() for k in userinfo: setattr(user, k, userinfo.get(k)) userinfo = user return userinfo or None def delete_user(uid): _cache = redis_connection() key = str("%s:user:%s" % (APP, uid)) _cache.delete(key) def get_anonymous_count(): _cache = redis_connection() key = "%s:anonymous:count" % APP count = _cache.get(key) if not count: session = create_session() count = session.query(User).filter( User.open_id.startswith("anonymous")).count() _cache.set(key, count, settings.CACHE_TIMEOUT) session.close() return int(count) def incr_anonymous_count(): _cache = redis_connection() key = "%s:anonymous:count" % APP count = get_anonymous_count() _cache.set(key, count + 1, settings.CACHE_TIMEOUT) def get_blog(blog_id): """ 获取博客的数据 """ _cache = redis_connection() key = str("%s:blog:%s" % (APP, blog_id)) bloginfo = _cache.get(key) new = False if not bloginfo: session = create_session() bloginfo = session.query(BlogArticle).filter_by(id=blog_id).first() bloginfo = orm2json(bloginfo) _cache.set(key, json.dumps(bloginfo), settings.CACHE_TIMEOUT) new = True session.close() if not new: bloginfo = json.loads(bloginfo) return bloginfo def delete_blog(blog_id): _cache = redis_connection() key = str("%s:blog:%s" % (APP, blog_id)) _cache.delete(key) def orm2json(orm): """ 将sqlalchemy返回的对象转换为可序列话json类型的对象 """ def single2py(instance): d = {} if instance: keys = instance.__dict__.keys() for key in keys: if key.startswith('_'): continue value = getattr(instance, key) d[key] = isinstance(value, datetime.datetime) and \ value.strftime('%Y-%m-%d %H:%M:%S') or value return d if isinstance(orm, list): return [single2py(ins) for ins in orm] return single2py(orm)
/views/base.py
#coding=utf8 import datetime from flask import Blueprint, request, jsonify, render_template, redirect import flask_login import weibo as sinaweibo from models.base import create_session, User from utils import user_cache from configs import settings bp_base = Blueprint('base', __name__, url_prefix='/base') @bp_base.route('/weibo/login/') def weibo_login(): api = sinaweibo.Client(settings.API_KEY,settings.API_SECRET,settings.REDIRECT_URI) code = request.args.get('code') try: api.set_code(code) except Exception, e: return redirect('/blog/') sinainfo = api.token user = user_cache.get_user(sinainfo.get('uid'), format='object') if user: flask_login.login_user(user, remember=True) else: user = User() user.open_id = sinainfo.get('uid') user.token = sinainfo.get('access_token') userinfo = api.get('users/show', uid=sinainfo.get('uid')) user.name = userinfo.get('name') user.address = userinfo.get('location') user.create_time = datetime.datetime.now() session = create_session() session.add(user) session.commit() flask_login.login_user(user, remember=True) session.close() return redirect('/blog/') @bp_base.route('/logout/') def logout(): flask_login.logout_user() return redirect('/blog/')
/views/blog.py
# coding=utf8 import datetime import urllib from flask import Blueprint, request, jsonify, render_template, g import flask_login from sae.storage import Bucket from models.blog import create_session, BlogArticle from utils.blog_cache import set_draft_blog from configs import settings bp_blog = Blueprint('blog', __name__, url_prefix='/blog') bucket = Bucket(settings.STORAGE_BUCKET_DOMAIN_NAME) bucket.put() @bp_blog.route('/') @bp_blog.route('/list/') def list(): session = create_session() blogs = session.query(BlogArticle).order_by(BlogArticle.update_time.desc())\ .all() session.close() return render_template('blog/blog_list.html', blogs=blogs) @bp_blog.route('/delete/<int:blog_id>/', methods=['POST']) @flask_login.login_required def delete(blog_id): session = create_session() blog = session.query(BlogArticle).filter_by(id=blog_id).first() if blog.create_by == g.user.id: blog.is_active = 0 session.commit() session.close() return jsonify(ok=True, data={'blog_id': blog_id}) session.close() return jsonify(ok=False, reason=u'数据错误') @bp_blog.route('/draft/', methods=['POST']) @flask_login.login_required def draft(): """ 保存未上传的文章为草稿 """ form = request.form markdown = form.get('markdown', '') set_draft_blog(flask_login.current_user.id, markdown) return jsonify(ok=True) @bp_blog.route('/edit/<int:blog_id>/', methods=['GET', 'POST']) @bp_blog.route('/edit/', methods=['GET', 'POST']) @flask_login.login_required def edit(blog_id=0): if request.method == 'GET': if blog_id == 0: blog = None else: session = create_session() blog = session.query(BlogArticle).filter_by(id=blog_id).first() session.close() return render_template('blog/blog_edit.html', blog=blog) if request.method == 'POST': form = request.form markdown = form.get('markdown') title = form.get('title') blog_id = form.get('blog_id') if markdown and title and (len(markdown.strip()) * len(title.strip()) > 0): session = create_session() now = datetime.datetime.now() # blog_id belong to this user if blog_id: blog = session.query(BlogArticle).filter_by(id=blog_id).first() if not blog_id or not blog: blog = BlogArticle() blog.create_by = flask_login.current_user.id blog.create_time = now blog.is_active = 1 blog.update_time = now blog.title = title blog.markdown = markdown session.add(blog) session.commit() blog_id = blog.id session.close() return jsonify(ok=True, data={'blog_id': blog_id}) return jsonify(ok=False, reason=u'数据错误') @bp_blog.route('/view/<int:blog_id>/') def view_blog(blog_id): session = create_session() query = session.query(BlogArticle).filter_by(id=blog_id) if not flask_login.current_user.is_active(): query = query.filter_by(is_active=1) blog = query.first() session.close() return render_template('blog/blog_view.html', blog=blog) @bp_blog.route('/files/', methods=['POST']) @flask_login.login_required def save_file(): """ 存储上传的图片 """ files_name = request.files.keys() ret = [] for fn in files_name: # 暂未做安全校验 PIL img_file = request.files.get(fn) bucket.put_object(fn, img_file) link = bucket.generate_url(fn) ret.append({'name': fn, 'link': link}) http_files_link = request.form.keys() for fn in http_files_link: http_link = request.form.get(fn) img_file = urllib.urlopen(http_link) bucket.put_object(fn, img_file) link = bucket.generate_url(fn) ret.append({'name': fn, 'link': link}) return jsonify(ok=True, data=ret)
/views/security.py
# coding=utf8 """ 学web安全用到的一些页面 """ from flask import Blueprint, render_template from sae.storage import Bucket from configs import settings bp_security = Blueprint('security', __name__, url_prefix='/security') bucket = Bucket(settings.STORAGE_BUCKET_DOMAIN_NAME) bucket.put() @bp_security.route('/wanbo/video/') def wanbo_video(): return render_template('security/wanbo_video.html')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
liuchao012/myPythonWeb
refs/heads/master
{"/functional_tests/test_simple_list_creation.py": ["/functional_tests/base.py"], "/functional_tests/tests_layout_and_styling.py": ["/functional_tests/base.py"], "/functional_tests/tests_list_item_validation.py": ["/functional_tests/base.py"], "/test/listsss/tests_models.py": ["/listsss/views.py"], "/test/listsss/tests_views.py": ["/listsss/views.py"]}
└── ├── functional_tests │ ├── __init__.py │ ├── base.py │ ├── test_simple_list_creation.py │ ├── tests_layout_and_styling.py │ └── tests_list_item_validation.py ├── listsss │ ├── apps.py │ └── views.py └── test └── listsss ├── tests_models.py └── tests_views.py
/functional_tests/__init__.py
# -*- coding: utf-8 -*- # @Time : 2018/6/28 17:06 # @Author : Mat # @Email : mat_wu@163.com # @File : __init__.py.py # @Software: PyCharm ''' functional_tests,中的文件需要已tests开头系统命令才能读取到测试用例并执行测试 测试执行命令python manage.py test functional_tests,来完成功能测试 如果执行 python manage.py test 那么django 将会执行 功能测试和单元测试 如果想只运行单元测试则需要执行固定的app ,python manage.py test listsss '''
/functional_tests/base.py
# -*- coding: utf-8 -*- # @Time : 2018/6/25 20:15 # @Author : Mat # @Email : mat_wu@163.com # @File : functional_tests1.py # @Software: PyCharm from selenium import webdriver from selenium.webdriver.common.keys import Keys from django.test import LiveServerTestCase from django.contrib.staticfiles.testing import StaticLiveServerTestCase import unittest from unittest import skip class FunctionalTest(StaticLiveServerTestCase): #不知道为什么加上下面两个方法之后就报错了 # @classmethod # def setUpClass(cls): # pass # # @classmethod # def tearDownClass(cls): # pass def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(3) def tearDown(self): self.driver.quit() def check_for_row_in_list_table(self, row_text): table = self.driver.find_element_by_id('id_list_table') rows = table.find_elements_by_tag_name('tr') self.assertIn(row_text, [row.text for row in rows])
/functional_tests/test_simple_list_creation.py
# -*- coding: utf-8 -*- # @Time : 2018/6/25 20:15 # @Author : Mat # @Email : mat_wu@163.com # @File : functional_tests1.py # @Software: PyCharm from selenium import webdriver from selenium.webdriver.common.keys import Keys from django.test import LiveServerTestCase from django.contrib.staticfiles.testing import StaticLiveServerTestCase import unittest from unittest import skip from .base import FunctionalTest class NewVisitorTest(FunctionalTest): def test_can_start_a_list_and_retrieve_it_later(self): # 类继承LiveServerTestCase 后将不使用实际部署的localhost 地址,使用 django提供的self.live_server_url地址 # self.driver.get("http://localhost:8000") self.driver.get(self.live_server_url) # 发现页面上显示的 TO-DO 字样 self.assertIn('To-Do', self.driver.title) header_text = self.driver.find_element_by_tag_name('h1').text self.assertIn('To-Do', header_text) # 应用邀请输入一个代办事项 inputbox = self.driver.find_element_by_id('id_new_item') self.assertEqual(inputbox.get_attribute('placeholder'), 'Enter a to-do item') # 在输入框中输入购买孔雀羽毛 inputbox.send_keys('Buy peacock feathers') # 点击回车后页面更新 # 代办事项中显示 ‘1:Buy peacock feathers’ inputbox.send_keys(Keys.ENTER) edith_list_url = self.driver.current_url self.assertRegex(edith_list_url, '/list/.+?') table = self.driver.find_element_by_id('id_list_table') rows = table.find_elements_by_tag_name('tr') # self.assertTrue(any(row.text == '1:Buy peacock feathers' for row in rows), 'New to-do item did not appear in table - - its text was:\n%s' % (table.text)) # 页面又显示了一个文本框,可以输入其他代办事项 # 输入‘Use peacock feathers to make a fly’ inputbox = self.driver.find_element_by_id('id_new_item') inputbox.send_keys('Use peacock feathers to make a fly') inputbox.send_keys(Keys.ENTER) table = self.driver.find_element_by_id('id_list_table') rows = table.find_elements_by_tag_name('tr') self.assertIn('1:Buy peacock feathers', [row.text for row in rows]) self.assertIn('2:Use peacock feathers to make a fly', [row.text for row in rows]) ##我们需要新打开一个浏览器,并且不让cookice相互干扰 # 让录入的清单不会被别人看到 self.driver.quit() # 其他人访问页面看不到刚才录入的清单 self.driver = webdriver.Firefox() self.driver.get(self.live_server_url) page_text = self.driver.find_element_by_tag_name('body').text self.assertNotIn('Buy peacock feathers', page_text) self.assertNotIn('make a fly', page_text) # 他输入了新的代办事项,创建了一个新的代办清单 inputbox = self.driver.find_element_by_id('id_new_item') inputbox.send_keys('Buy milk') inputbox.send_keys(Keys.ENTER) # 他获得了一个属于他自己的url francis_list_url = self.driver.current_url self.assertRegex(edith_list_url, '/list/.+?') self.assertNotEquals(francis_list_url, edith_list_url) # 这个页面还是没有其他人的清单 # 但是这个页面包含他自己的清单 page_text = self.driver.find_element_by_tag_name('body').text self.assertNotIn('Buy peacock feathers', page_text) self.assertIn('Buy milk', page_text) # self.fail('Finisth the test')
/functional_tests/tests_layout_and_styling.py
# -*- coding: utf-8 -*- # @Time : 2018/6/25 20:15 # @Author : Mat # @Email : mat_wu@163.com # @File : functional_tests1.py # @Software: PyCharm from selenium import webdriver from selenium.webdriver.common.keys import Keys from django.test import LiveServerTestCase from django.contrib.staticfiles.testing import StaticLiveServerTestCase import unittest from unittest import skip from .base import FunctionalTest class LayoutAndStylingTest(FunctionalTest): def test_layout_and_styling(self): self.driver.get(self.live_server_url) self.driver.set_window_size(1024, 768) # 查看页面元素居中 inputbox = self.driver.find_element_by_id('id_new_item') self.assertAlmostEqual(inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10) # 保存成功后,清单列表的输入框也居中 inputbox.send_keys('testing') inputbox.send_keys(Keys.ENTER) inputbox = self.driver.find_element_by_id('id_new_item') self.assertAlmostEqual(inputbox.location['x'] + inputbox.size['width'] / 2, 512, delta=10)
/functional_tests/tests_list_item_validation.py
# -*- coding: utf-8 -*- # @Time : 2018/6/25 20:15 # @Author : Mat # @Email : mat_wu@163.com # @File : functional_tests1.py # @Software: PyCharm from selenium import webdriver from selenium.webdriver.common.keys import Keys from django.test import LiveServerTestCase from django.contrib.staticfiles.testing import StaticLiveServerTestCase import unittest from unittest import skip from .base import FunctionalTest class ItemValidationTest(FunctionalTest): def test_cannot_add_empty_list_items(self): self.driver.get(self.live_server_url) self.driver.find_element_by_id('id_new_item').send_keys('\n') error = self.driver.find_element_by_css_selector('.has-error') self.assertEqual(error.text, "You cant have an empty list item") self.driver.find_element_by_id('id_new_item').send_keys('Buy milk\n') self.check_for_row_in_list_table('1:Buy milk') self.driver.find_element_by_id('id_new_item').send_keys('\n') self.check_for_row_in_list_table('1:Buy milk') error = self.driver.find_element_by_css_selector('.has-error') self.assertEqual(error.text, "You cant have an empty list item") self.driver.find_element_by_id('id_new_item').send_keys('Buy tea\n') self.check_for_row_in_list_table('1:Buy milk') self.check_for_row_in_list_table('2:Buy tea') self.fail("write me!")
/listsss/apps.py
from django.apps import AppConfig class ListsssConfig(AppConfig): name = 'listsss'
/listsss/views.py
from django.shortcuts import render, redirect # redirect是python的重定向方法 from django.http import HttpResponse from listsss.models import Item, List from django.core.exceptions import ValidationError # Create your views here. def home_page(request): # return HttpResponse("<html><title>To-Do lists</title></html>") # if (request.method=='POST'): # return HttpResponse(request.POST['item_text']) # 新加了页面这里就可以删除了 # if (request.method == 'POST'): # new_item_text = request.POST['item_text'] # Item.objects.create(text=new_item_text) # return redirect('/list/the-only-list-in-the-world/') ##第二种方法 # else: # new_item_text = '' # return render(request, 'listsss/home.html', {'new_item_text':new_item_text}) ##第一种方法 # item = Item() # item.text = request.POST.get('item_text', '') # item.save() # return render(request, 'listsss/home.html', {'new_item_text':request.POST.get('item_text','')}) # 这里首页不用展示相关的数据了 # items_list = Item.objects.all() # return render(request, 'listsss/home.html', {'items_list': items_list}) return render(request, 'listsss/home.html') def view_list(request, list_id): error = None list_ = List.objects.get(id=list_id) if request.method == 'POST': try: item = Item.objects.create(text=request.POST['item_text'], list=list_) item.full_clean() item.save() #简化 #return redirect('/list/%d/' % (list_.id,)) return redirect(list_) except ValidationError: item.delete() # 不知道为什么要加这一步,书里面没有这步骤,书上说抓取到这个错误就不会存到数据库里面了,可还是存进去了 error = 'You cant have an empty list item' return render(request, 'listsss/list.html', {'list': list_, 'error': error}) def new_list(request): list_ = List.objects.create() item = Item.objects.create(text=request.POST['item_text'], list=list_) try: item.full_clean() item.save() except ValidationError: list_.delete() item.delete() # 不知道为什么要加这一步,书里面没有这步骤,书上说抓取到这个错误就不会存到数据库里面了,可还是存进去了 error = 'You cant have an empty list item' return render(request, 'listsss/home.html', {"error": error}) # 重新定义到有效地址 # return redirect('/list/the-only-list-in-the-world/') # 去除硬编码 # return redirect('/list/%d/' % (list_.id,)) return redirect('view_list', list_.id) def add_item(request, list_id): list_ = List.objects.get(id=list_id) Item.objects.create(text=request.POST['item_text'], list=list_) return redirect('/list/%d/' % (list_.id,)) class home_page_class(): pass
/test/listsss/tests_models.py
from django.test import TestCase from django.urls import resolve from django.http import HttpRequest from django.template.loader import render_to_string from listsss.models import Item, List from listsss.views import home_page import unittest from django.core.exceptions import ValidationError class ListAndItemModelsTest(TestCase): def test_saving_and_retrieving_items(self): list_ = List() list_.save() first_item = Item() first_item.text = 'The first (ever) list item' first_item.list = list_ first_item.save() second_item = Item() second_item.text = 'Item the second' second_item.list = list_ second_item.save() saved_liat = List.objects.first() self.assertEqual(saved_liat, list_) saved_items = Item.objects.all() self.assertEqual(saved_items.count(), 2) first_save_item = saved_items[0] second_save_item = saved_items[1] self.assertEqual(first_save_item.text, 'The first (ever) list item') self.assertEqual(first_save_item.list, list_) self.assertEqual(second_save_item.text, 'Item the second') self.assertEqual(second_save_item.list, list_) def test_cannot_save_empty_list_items(self): list_=List.objects.create() item = Item(list= list_, text='') with self.assertRaises(ValidationError): item.save() item.full_clean() def test_get_absolute_url(self): list_ = List.objects.create() self.assertEqual(list_.get_absolute_url(), '/list/%d/'%(list_.id,))
/test/listsss/tests_views.py
from django.test import TestCase from django.urls import resolve from django.http import HttpRequest from django.template.loader import render_to_string from django.utils.html import escape from listsss.models import Item, List from listsss.views import home_page import unittest # Create your tests here. class HomePageTest(TestCase): def test_root_url_resolves_to_home_page_view(self): print("第x个测试通过了") found = resolve('/') self.assertEqual(found.func, home_page) def test_home_page_return_correct_html(self): request = HttpRequest() resp = home_page(request) # 使用render_to_string ,django自带函数 生成string字符串,和渲染获取到的字符串对比 #### 注释:这个没有办法解决,两次生成得tocken值是不相同的,所以先注释掉这个字段对应的断言 expected_html = render_to_string('listsss/home.html', request=request) # .decode()将字符串转换成unicode # self.assertEqual(resp.content.decode(), expected_html) # self.assertTrue(resp.content.startswith(b'<html>')) self.assertIn(b"<title>To-Do lists</title>", resp.content) self.assertTrue(resp.content.endswith(b'</html>')) # def test_home_page_only_saves_items_when_necessary(self): # request = HttpRequest() # home_page(request) # self.assertEqual(Item.objects.count(), 0) # 中途这个用例不要了 # def test_home_page_displays_all_list_items(self): # Item.objects.create(text='itemey 1') # Item.objects.create(text='itemey 2') # # req = HttpRequest() # rep = home_page(req) # # self.assertIn('itemey 1', rep.content.decode()) # self.assertIn('itemey 2', rep.content.decode()) class ListViewTest(TestCase): # def test_home_page_displays_all_list_items(self): def test_home_page_displays_only_items_for_that_list(self): # list_ = List.objects.create() # Item.objects.create(text='itemey 1', list=list_) # Item.objects.create(text='itemey 2', list=list_) correct_list = List.objects.create() Item.objects.create(text='itemey 1', list=correct_list) Item.objects.create(text='itemey 2', list=correct_list) other_list = List.objects.create() Item.objects.create(text='other itemey 1', list=other_list) Item.objects.create(text='other itemey 2', list=other_list) # resp = self.client.get('/list/the-only-list-in-the-world/') resp = self.client.get('/list/%d/' % (correct_list.id,)) self.assertContains(resp, 'itemey 1') self.assertContains(resp, 'itemey 2') self.assertNotContains(resp, 'other itemey 1') self.assertNotContains(resp, 'other itemey 2') def test_uses_list_template(self): # resp = self.client.get('/list/the-only-list-in-the-world/') list_ = List.objects.create() resp = self.client.get('/list/%d/' % (list_.id,)) self.assertTemplateUsed(resp, 'listsss/list.html') def test_passes_correct_list_to_template(self): other_list = List.objects.create() correct_list = List.objects.create() resp = self.client.get('/list/%d/' % (correct_list.id,)) self.assertEqual(resp.context['list'], correct_list) def test_can_save_a_POST_to_an_existing_list(self): other_list = List.objects.create() correct_list = List.objects.create() self.client.post('/list/%d/' % (correct_list.id,), data={'item_text': 'A new item for an existiong list'}) self.assertEqual(Item.objects.count(), 1) new_item = Item.objects.first() self.assertEqual(new_item.text, 'A new item for an existiong list') self.assertEqual(new_item.list, correct_list) def test_POST_redirects_to_list_view(self): other_list = List.objects.create() correct_list = List.objects.create() resp = self.client.post('/list/%d/' % (correct_list.id,), data={'item_text': 'A new item for an existiong list'}) self.assertRedirects(resp, '/list/%d/' % (correct_list.id,)) def test_validation_errors_end_up_on_lists_page(self): list_ = List.objects.create() resp = self.client.post('/list/%d/'%(list_.id,), data={"item_text":''}) self.assertEqual(resp.status_code, 200) self.assertTemplateUsed(resp, 'listsss/list.html') ex_error=escape('You cant have an empty list item') self.assertContains(resp, ex_error) class NewListTest(TestCase): def test_saving_a_POST_request(self): self.client.post('/list/new', data={'item_text': 'A new list item'}) self.assertEqual(Item.objects.count(), 1) new_item = Item.objects.first() self.assertEqual(new_item.text, 'A new list item') # requ = HttpRequest() # requ.method = 'POST' # requ.POST['item_text'] = 'A new list item' # # rep = home_page(requ) # # self.assertEqual(Item.objects.count(), 1) # new_item = Item.objects.first() # self.assertEqual(new_item.text, 'A new list item') # # # 下面这部分单独拿出去做一个 单独的单元测试 # # self.assertIn('A new list item', rep.content.decode()) # # post 请求后页面重定向 # # self.assertEqual(rep.status_code, 302) # # self.assertEqual(rep['location'], '/') def test_redirects_after_POST(self): rep = self.client.post('/list/new', data={'item_text': 'A new list item'}) # self.assertEqual(rep.status_code, 302) new_list = List.objects.first() self.assertRedirects(rep, '/list/%d/' % (new_list.id,)) # django 的检查项 # self.assertRedirects(rep, '/list/the-only-list-in-the-world/') # 这段重新修改 # requ = HttpRequest() # requ.method = 'POST' # requ.POST['item_text'] = 'A new list item' # # rep = home_page(requ) # self.assertEqual(rep.status_code, 302) # self.assertEqual(rep['location'], '/list/the-only-list-in-the-world/') def test_validation_error_are_sent_back_to_home_page_template(self): resp = self.client.post('/list/new', data={'item_text': ''}) self.assertEqual(resp.status_code, 200) self.assertTemplateUsed(resp, 'listsss/home.html') ex_error = escape("You cant have an empty list item") print(resp.content.decode()) self.assertContains(resp, ex_error) def test_invalid_list_items_arent_saved(self): self.client.post('/list/new', data={"item_text": ''}) self.assertEqual(List.objects.count(), 0) self.assertEqual(Item.objects.count(), 0)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
daphnejwang/MentoreeMatch
refs/heads/master
{"/Project/endorsements.py": ["/Project/tabledef.py"], "/Project/email_module.py": ["/Project/tabledef.py"], "/Project/linkedin.py": ["/Project/tabledef.py"], "/Project/main.py": ["/Project/tabledef.py"], "/Project/search.py": ["/Project/tabledef.py"], "/Project/topic_seed.py": ["/Project/tabledef.py"]}
└── ├── Project │ ├── email_.py │ ├── email_module.py │ ├── endorsements.py │ ├── linkedin.py │ ├── main.py │ ├── mentorsearch.py │ ├── search.py │ ├── tabledef.py │ └── topic_seed.py └── server.py
/Project/email_.py
#import tabledef #from tabledef import User, MentoreeTopic, Topic import requests print requests # import pdb # def send_message(recipient, subject, text): # return requests.post( # "https://api.mailgun.net/v2/samples.mailgun.org/messages", # auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), # data={"from": "Mentoree Match <mentoreematch@app27934969.mailgun.org>", # "to": recipient.email_address, # "subject": subject, # "text": "Testing some Mailgun awesomness!"}) def send_message(): # pdb.set_trace() print dir(requests) x = requests.post( "https://api.mailgun.net/v2/samples.mailgun.org/messages", auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), data={"from": "Mentoree Match <mentoreematch@app27934969.mailgun.org>", "to": "Daphnejwang@gmail.com", "subject": "testing email", "text": "Testing some Mailgun awesomness!"}) return 'hi' # key = 'YOUR API KEY HERE' # sandbox = 'YOUR SANDBOX URL HERE' # recipient = 'YOUR EMAIL HERE' # request_url = 'https://api.mailgun.net/v2/{0}/messages'.format(sandbox) # request = requests.post(request_url, auth=('api', key), data={ # 'from': 'hello@example.com', # 'to': recipient, # 'subject': 'Hello', # 'text': 'Hello from Mailgun' # }) # print 'Status: {0}'.format(request.status_code) # print 'Body: {0}'.format(request.text) send_message()
/Project/email_module.py
import tabledef from tabledef import User, MentoreeTopic, Topic, Email import requests import sqlalchemy from sqlalchemy import update import datetime from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session # import pdb def save_email_info_to_database(sender, mentor, subject, subject_body): today = datetime.datetime.now() email_info = tabledef.Email(sender_id=sender, receiver_id=mentor, subject=subject, text_body=subject_body, sent_date=today) print "!!~~~!!^^^ email info" print email_info tabledef.dbsession.add(email_info) return tabledef.dbsession.commit() def send_email(sender_email, mentor_email, subject, subject_body): return requests.post( "https://api.mailgun.net/v2/app27934969.mailgun.org/messages", auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), data={"from": sender_email, "to": mentor_email, "subject": subject, "text": subject_body}) def get_email_history_per_mentor(linkedin_id): email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).filter_by(receiver_id=linkedin_id).all() return email_hist def get_sent_email_history_per_sender(): email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).all() return email_hist def get_email_history(): email_hist = tabledef.dbsession.query(Email).filter_by(receiver_id=session['linkedin_id']).all() for mail in email_hist: print "~!@#$%^&*( email history!! !@#$%^&" print mail.subject return email_hist def get_email_with_id(email_id): email_id = tabledef.dbsession.query(Email).filter_by(id=email_id).all() eid = email_id[0] return eid def format_json(row): formatted_json_dict={} for column in row.__table__.columns: formatted_json_dict[column.name] = str(getattr(row, column.name)) return formatted_json_dict def delete_email(id): deleted_email=tabledef.dbsession.query(Email).filter_by(id=id).first() tabledef.dbsession.delete(deleted_email) tabledef.dbsession.commit() # return requests.post( # "https://api.mailgun.net/v2/app27934969.mailgun.org/messages", # auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), # data={"from": "Excited User <me@samples.mailgun.org>", # "to": "daphnejwang@gmail.com", # "subject": "Hello", # "text": "Testing some Mailgun awesomness!"})
/Project/endorsements.py
import tabledef from tabledef import User, MentoreeTopic, Topic, Email, Endorsement import requests import sqlalchemy from sqlalchemy import update import datetime from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session # import pdb def save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body): today = datetime.datetime.now() endorsement_info = tabledef.Endorsement(sender_id=sender, receiver_id=mentor, title=endorsement_title, endorsements_text=endorsement_body, sent_date=today) print "!!~~~!!^^^ endorsement_info info" print endorsement_info tabledef.dbsession.add(endorsement_info) return tabledef.dbsession.commit() def get_endorsement_info_per_mentor(linkedin_id): endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=linkedin_id).all() # for endorsements in endorsement_hist: # print "!^^^^^^^^^^^^^^^^endorsement history!! ^^^^^^^^^^^^^^^^^^^^^" # print endorsements.sender.picture_url return endorsement_hist def get_endorsement_info_for_self(): profile_endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=session['linkedin_id']).all() for endorsements in profile_endorsement_hist: print "!^^^^^^^^^^^^^^^^endorsements_text!!^^^^^^^^^^^^^^^^" print endorsements.endorsements_text return profile_endorsement_hist
/Project/linkedin.py
from flask_oauthlib.client import OAuth from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session import jinja2 import tabledef from tabledef import * from sqlalchemy import update from xml.dom.minidom import parseString import os import urllib import json from Project import app import pdb from tabledef import User oauth = OAuth(app) linkedin = oauth.remote_app( 'linkedin', consumer_key='75ifkmbvuebxtg', consumer_secret='LAUPNTnEbsBu7axq', request_token_params={ 'scope': 'r_fullprofile,r_basicprofile,r_emailaddress', 'state': 'RandomString', }, base_url='https://api.linkedin.com/v1/', request_token_url=None, access_token_method='POST', access_token_url='https://www.linkedin.com/uas/oauth2/accessToken', authorize_url='https://www.linkedin.com/uas/oauth2/authorization', ) def authorized(resp): if resp is None: return 'Access denied: reason=%s error=%s' % ( request.args['error_reason'], request.args['error_description'] ) session['linkedin_token'] = (resp['access_token'], '') linkedin_json_string = linkedin.get('people/~:(id,first-name,last-name,industry,headline,site-standard-profile-request,certifications,educations,summary,specialties,positions,picture-url,email-address)') session['linkedin_id'] = linkedin_json_string.data['id'] tabledef.import_linkedin_user(linkedin_json_string.data) return jsonify(linkedin_json_string.data) @linkedin.tokengetter def get_linkedin_oauth_token(): return session.get('linkedin_token') def change_linkedin_query(uri, headers, body): auth = headers.pop('Authorization') headers['x-li-format'] = 'json' if auth: auth = auth.replace('Bearer', '').strip() if '?' in uri: uri += '&oauth2_access_token=' + auth else: uri += '?oauth2_access_token=' + auth return uri, headers, body def save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics): tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).update({ 'mentor': mentoree_choice, 'age':age_range, 'gender':gender_input, 'description':description_input, 'new_user':False}) for topics in mentor_topics: mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id']) tabledef.dbsession.add(mentor_selected_topics) return tabledef.dbsession.commit() def update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics): user = tabledef.dbsession.query(User).filter_by(linkedin_id=session['linkedin_id']).first() user.mentor = mentoree_choice user.age = age_range user.gender = gender_input user.description = description_input current_selected_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=session['linkedin_id']).all() for curr_topics in current_selected_topics: tabledef.dbsession.delete(curr_topics) # pdb.set_trace() for topics in mentor_topics: mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id']) tabledef.dbsession.add(mentor_selected_topics) return tabledef.dbsession.commit() linkedin.pre_request = change_linkedin_query
/Project/main.py
from flask_oauthlib.client import OAuth from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session import jinja2 import tabledef import search from tabledef import User, MentoreeTopic, Topic import linkedin from xml.dom.minidom import parseString from Project import app import json from flask import redirect import pagination import email_module import endorsements app.debug = True app.secret_key = 'iLoveHelloKitty' # Pagination PER_PAGE = 5 def url_for_other_page(page, mentee_topic_choice): args = dict(request.view_args.items() + request.args.to_dict().items()) args['page'] = page args['mentee_topic_choice'] = mentee_topic_choice return url_for(request.endpoint, **args) app.jinja_env.globals['url_for_other_page'] = url_for_other_page # LOGIN Pages @app.route('/login') def login(): return linkedin.linkedin.authorize(callback=url_for('get_linkedin_data', _external=True)) @app.route('/logout') def logout(): session.pop('linkedin_token', None) return redirect(url_for('index')) @app.route('/login/authorized') @linkedin.linkedin.authorized_handler def get_linkedin_data(resp): user_json = linkedin.authorized(resp) user_json = user_json.data user_string = json.loads(user_json) user = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=user_string["id"]).first() if user and user.new_user: return redirect(url_for('addinfo_page')) # print linkedin.authorize(callback=url_for('authorized', _external=True)) return redirect(url_for('index')) # HOME & ACCOUNT CREATION Pages @app.route('/') def homepage(): return render_template('home_page.html') @app.route('/home') def index(): if 'linkedin_token' in session: me = linkedin.linkedin.get('people/~') jsonify(me.data) # linkedin_data = json.loads(linkedin_json_string) topics = tabledef.Topic.query.order_by("topic_id").all() return render_template('index.html', topics=topics) return redirect(url_for('login')) @app.route('/additionalinfo', methods=["GET"]) def addinfo_page(): return render_template('additionalinfo.html') @app.route('/additionalinfo', methods=["POST"]) def addinfo(): mentoree_choice = request.form.get('mentoree-radios') age_range = request.form.get('agerange') gender_input = request.form.get('gender_radios') description_input = request.form.get('description') mentor_topics = request.form.getlist('mentortopics') linkedin.save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics) # current_user = tabledef.dbsession.query(tabledef.User).filter_by(linkedintoken=session['linkedin_token']).first() return redirect(url_for('index')) @app.route('/home', defaults={'page': 1}, methods=["POST"]) @app.route('/home/page/<int:page>/<mentee_topic_choice>') def search_results(page, mentee_topic_choice = None): mentee_topic_choice = mentee_topic_choice or request.form.get('searchtopics') print "~~~~~~~~~~~~~~~~mentee_topic_choice" print mentee_topic_choice mentor_data = search.search(mentee_topic_choice) if mentor_data: start_index = (page - 1) * (PER_PAGE) end_index = (page) * (PER_PAGE) ment_count = len(mentor_data) users = mentor_data[start_index:end_index] # users = mentor_data.paginate(page, PER_PAGE, False) if not users and page != 1: abort(404) pagination_per_page = pagination.Pagination(page, PER_PAGE, ment_count) search_topic = search.search_topic_display(mentee_topic_choice) return render_template('searchresults.html', search_topic_display=search_topic, pagination=pagination_per_page, users=users, mentee_topic_choice=mentee_topic_choice) messages = flash('Sorry! There are no mentors under this search topic') return redirect(url_for('index')) # MENTOR DETAIL PAGES @app.route('/mentor_detail/<linkedin_id>', methods=["GET"]) def mentor_page(linkedin_id): ment_data = search.mentor_detail_display(linkedin_id) user_data = search.mentor_detail_display(session['linkedin_id']) endorsement_history = endorsements.get_endorsement_info_per_mentor(linkedin_id) return render_template('mentor_detail.html', ment_data=ment_data, user_data=user_data, endorsement_history=endorsement_history) @app.route('/mentor_detail', methods=["POST"]) def add_endorsement(): sender = session['linkedin_id'] sender_data= search.mentor_detail_display(sender) mentor = request.form.get('mentor_id') print "~~~~~~~~~~~~~~~~MENTOR ID on main" print mentor mentor_data = search.mentor_detail_display(mentor) endorsement_title = request.form.get('endorsement_title') endorsement_body = request.form.get('endorsement_txt') endorsements.save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body) return redirect(url_for('mentor_page', linkedin_id=mentor)) # SELF PROFILE PAGES @app.route('/profile', methods=["GET"]) def self_page(): if 'linkedin_id' in session: ment_data = search.mentor_detail_display(session['linkedin_id']) profile_endorsement_hist = endorsements.get_endorsement_info_for_self() return render_template('self_profile.html', ment_data=ment_data, profile_endorsement_hist=profile_endorsement_hist) return redirect(url_for('login')) @app.route('/profile', methods=["POST"]) def update_self_page(): if 'linkedin_id' in session: ment_data = search.mentor_detail_display(session['linkedin_id']) update_data = tabledef.update_linkedin_user() return render_template('self_profile.html', ment_data=ment_data) return redirect(url_for('self_page')) @app.route('/edit_profile', methods=["GET"]) def mentor_page_update(): if 'linkedin_id' in session: ment_data = search.mentor_detail_display(session['linkedin_id']) ment_pers_topics = search.mentor_personal_topics(session['linkedin_id']) topics = tabledef.Topic.query.order_by("topic_id").all() return render_template('edit_self_profile.html', ment_data=ment_data, ment_pers_topics=ment_pers_topics, topics=topics) return redirect(url_for('login')) @app.route('/edit_profile', methods=["POST"]) def mentor_page_update_post(): mentoree_choice = request.form.get('mentoree-radios') age_range = request.form.get('agerange') gender_input = request.form.get('gender_radios') description_input = request.form.get('description') mentor_topics = request.form.getlist('mentortopics') linkedin.update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics) return redirect(url_for('self_page')) # EMAIL FORM Page @app.route('/email/<linkedin_id>', methods=["GET"]) def email_get(linkedin_id): ment_data = search.mentor_detail_display(linkedin_id) user_data = search.mentor_detail_display(session['linkedin_id']) email_history = email_module.get_email_history_per_mentor(linkedin_id) return render_template('email_form.html', ment_data=ment_data, user_data=user_data, email_history=email_history) @app.route('/email', methods=["POST"]) def email_post(): sender = session['linkedin_id'] sender_data= search.mentor_detail_display(sender) sender_email = sender_data.email mentor = request.form.get('mentor_id') mentor_data = search.mentor_detail_display(mentor) mentor_email = mentor_data.email subject = request.form.get('subject') subject_body = request.form.get('message') email_module.save_email_info_to_database(sender, mentor, subject, subject_body) email_module.send_email(sender_email, mentor_email, subject, subject_body) messages = flash('Success! Your message has been sent successfully.') return redirect(url_for('email_get', linkedin_id=mentor, messages=messages)) # EMAIL INBOX Page @app.route('/email_history', methods=["GET"]) def email_history(): user_data = search.mentor_detail_display(session['linkedin_id']) email_history = email_module.get_email_history() return render_template('email_history.html', user_data=user_data, email_history=email_history) @app.route('/email_sent_history', methods=["GET"]) def email_sent_history(): user_data = search.mentor_detail_display(session['linkedin_id']) email_history = email_module.get_sent_email_history_per_sender() return render_template('email_sent_history.html', user_data=user_data, email_history=email_history) @app.route('/email_detail/<email_id>', methods=["GET"]) def email_detail(email_id): eid = email_module.get_email_with_id(email_id) email_selected = {} email_selected["id"] = eid.id email_selected["receiver_id"] = eid.receiver_id email_selected["sender_id"] = eid.sender_id email_selected["sent_date"] = eid.sent_date.strftime("%d/%m/%Y") email_selected["subject"] = eid.subject email_selected["text_body"] = eid.text_body email_selected["sender"] = {} email_selected["sender"]["first_name"] = eid.sender.first_name email_selected["sender"]["last_name"] = eid.sender.last_name return json.dumps(email_selected) @app.route('/delete_email/<int:id>', methods=["GET"]) def delete_email(id): if 'linkedin_id' not in session: return 'error' email_module.delete_email(id) return str(id) @app.route('/about', methods=["GET"]) def about_us(): return render_template('about_us.html')
/Project/mentorsearch.py
# from flask import Flask, render_template, redirect, request, flash, url_for, session # import jinja2 # import tabledef # from tabledef import Users, MentorCareer, MentorSkills # from xml.dom.minidom import parseString # import os # import urllib # app = Flask(__name__) # app.secret_key = "topsecretkey" # app.jinja_env.undefined = jinja2.StrictUndefined # @app.route("/") # def index(): # print "hello" # return "hello" # @app.route("/login", methods=["GET"]) # def get_userlogin(): # error = None # f = urllib.urlopen("http://127.0.0.1:5000/login") # print "!~~~~!~~~~!" # print f.read() # # url = os.environ['HTTP_HOST'] # # xmlDoc = parseString(url) # # print xmlDoc # # linkedin_auth = {} # return render_template("login.html", error = error) # @app.route("/login", methods=["POST"]) # def login_user(): # found_user = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first() # print "found user", found_user # error = None # if found_user: # print "User found" # session['user'] = found_user.id # return redirect("/") # else: # print "User not found" # #flash('Invalid username/password.') # error = "Invalid Username" # return render_template('login.html', error = error) # # return redirect("/") # @app.route("/create_newuser", methods=["GET"]) # def get_newuser(): # return render_template("newuser.html") # @app.route("/create_newuser", methods=["POST"]) # def create_newuser(): # # print "SESSION", tabledef.dbsession # user_exists = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first() # print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" # print "USER EXISTS", user_exists # if user_exists != None: # flash(" User already exists. Please login") # return redirect("/create_newuser") # else: # user = User(email=request.form['email'], password= request.form['password'], age=request.form['age'], sex=request.form['sex'], occupation=request.form['occupation'], zipcode=request.form['zipcode']) # tabledef.dbsession.add(user) # tabledef.dbsession.commit() # flash("Successfully added new user!") # return redirect("/") # if __name__ == "__main__": # app.run(debug = True)
/Project/search.py
from flask_oauthlib.client import OAuth from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session import jinja2 import tabledef from tabledef import User, MentoreeTopic, Topic import linkedin from xml.dom.minidom import parseString import pdb # from Project import app def search(searchtopics): search_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all() return search_results def search_topic_display(searchtopics): search_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all() search_topic = tabledef.dbsession.query(tabledef.Topic).filter_by(topic_id=search_results[0].topic_id).first() search_topic_title = search_topic.title print search_topic_title return search_topic_title def mentor_detail_display(linkedin_id): # pdb.set_trace() ment_data = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=linkedin_id).first() # print "!!~~~~~~~~~~~ment_data.positions[0].positions_title~~~~~~~~~~~~~~~~~~~~~~!!" # print ment_data.positions[0].positions_title # ment_data.positions.positions_title return ment_data def mentor_personal_topics(linkedin_id): # pdb.set_trace() ment_pers_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=linkedin_id).all() # for topics in ment_pers_topics: # print "((((((~~~~~~~~~~~topics.topic_id~~~~~~~~~~~~~~~~~~~~~~))" # print topics.topic_id return ment_pers_topics
/Project/tabledef.py
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine from sqlalchemy import Column, Integer, String, Boolean, Text, DateTime from sqlalchemy.orm import sessionmaker from sqlalchemy import ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.orm import sessionmaker, scoped_session import pdb import os DATABASE_URL = os.environ.get("DATABASE_URL", "sqlite:///mentoring.db") engine = create_engine(DATABASE_URL, echo=False) dbsession = scoped_session(sessionmaker(bind=engine, autocommit=False, autoflush=False)) Base = declarative_base() Base.query = dbsession.query_property() ### Class declarations class User(Base): __tablename__ = "users" # use linkedin ID, therefore never duplicating a user linkedin_id = Column(String(50), primary_key = True) linkedintoken = Column(String(50), nullable = True) new_user = Column(Boolean, nullable = True) first_name = Column(String(64), nullable = True) last_name = Column(String(64), nullable = True) email = Column(String(255), nullable = True) #~~~# Data From Additional Info Page mentor = Column (Boolean, nullable = True) age = Column(String(50), nullable = True) gender = Column(String(50), nullable = True) description = Column(String(1000), nullable = True) #~~~# industry = Column(String(64), nullable = True) headline = Column(String(100), nullable = True) picture_url = Column(String(200), nullable = True) certifications = Column(String(200), nullable = True) summary = Column(String(500), nullable=True) educations = relationship("Education") positions = relationship("Position") def import_linkedin_user(data): user = User(); # parsing siteStandardProfileRequest to get authToken user.linkedin_id = data.get('id',None) user.new_user = True token = data.get('siteStandardProfileRequest', None) if token != None: token_data = token['url'] start = token_data.find('authToken=')+10 end = token_data.find('=api', start) user.linkedintoken = token_data[start:end] user.first_name = data.get('firstName', None) user.last_name = data.get('lastName', None) user.email = data.get('emailAddress', None) user.industry = data.get('industry', None) user.headline = data.get('headline',None) educations = data.get('educations',None) education_models = [] # pdb.set_trace() ed_values = educations.get('values',None) if ed_values != None: for entry in ed_values: education = Education() education.linkedin_id = user.linkedin_id if 'startDate' in entry: edstartyear = entry['startDate']['year'] # print edstartyear education.educations_start_year = edstartyear if 'endDate' in entry: edendyear = entry['endDate']['year'] # print edendyear education.educations_end_year = edendyear if 'schoolName' in entry: schlname = entry['schoolName'] # print schlname education.educations_school_name = schlname if 'fieldOfStudy' in entry: edfield = entry['fieldOfStudy'] # print edfield education.educations_field_of_study = edfield if 'degree' in entry: eddegree = entry['degree'] # print eddegree education.educations_degree = eddegree education_models.append(education) positions = data.get('positions',None) position_models = [] pos_values = positions.get('values',None) if pos_values != None: for entry in pos_values: position = Position() position.linkedin_id = user.linkedin_id if 'startDate' in entry: posstartyear = entry['startDate']['year'] # print posstartyear position.positions_start_year = posstartyear if 'endDate' in entry: posendyear = entry['endDate']['year'] # print posendyear position.positions_end_year = posendyear if 'title' in entry: postitle = entry['title'] # print postitle position.positions_title = postitle if 'company' in entry: co_entry = entry['company'] if 'name' in co_entry: print "~~~~~~~~~~~~~~~~~~~~~~ company name" print entry print entry['company'] coname = entry['company']['name'] print coname position.positions_company_name = coname position_models.append(position) cert = data.get('certifications',None) if cert != None: cert_name = cert['values'][0]['name'] user.certifications = cert_name mentor_topics = MentoreeTopic() mentor_topics.linkedin_id = user.linkedin_id user.summary = data.get('summary',None) user.picture_url = data.get('pictureUrl', None) current_user_id = user.linkedin_id # print "~~!!^_^!!~~" existing_user = dbsession.query(User).filter_by(linkedin_id = current_user_id).first() if existing_user == None: dbsession.add(user) dbsession.add(mentor_topics) for model in education_models: # print "model" # print model dbsession.add(model) for models in position_models: dbsession.add(models) dbsession.commit() return user def update_linkedin_user(data): user = dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).first(); # parsing siteStandardProfileRequest to get authToken user.linkedin_id = data.get('id',None) user.new_user = True token = data.get('siteStandardProfileRequest', None) if token != None: token_data = token['url'] start = token_data.find('authToken=')+10 end = token_data.find('=api', start) user.linkedintoken = token_data[start:end] user.first_name = data.get('firstName', None) user.last_name = data.get('lastName', None) user.email = data.get('emailAddress', None) user.industry = data.get('industry', None) user.headline = data.get('headline',None) educations = data.get('educations',None) education_models = [] # pdb.set_trace() ed_values = educations.get('values',None) if ed_values != None: for entry in ed_values: education = Education() education.linkedin_id = user.linkedin_id if 'startDate' in entry: edstartyear = entry['startDate']['year'] # print edstartyear education.educations_start_year = edstartyear if 'endDate' in entry: edendyear = entry['endDate']['year'] # print edendyear education.educations_end_year = edendyear if 'schoolName' in entry: schlname = entry['schoolName'] # print schlname education.educations_school_name = schlname if 'fieldOfStudy' in entry: edfield = entry['fieldOfStudy'] # print edfield education.educations_field_of_study = edfield if 'degree' in entry: eddegree = entry['degree'] # print eddegree education.educations_degree = eddegree education_models.append(education) positions = data.get('positions',None) position_models = [] pos_values = positions.get('values',None) if pos_values != None: for entry in pos_values: position = Position() position.linkedin_id = user.linkedin_id if 'startDate' in entry: posstartyear = entry['startDate']['year'] # print posstartyear position.positions_start_year = posstartyear if 'endDate' in entry: posendyear = entry['endDate']['year'] # print posendyear position.positions_end_year = posendyear if 'title' in entry: postitle = entry['title'] # print postitle position.positions_title = postitle if 'company' in entry: co_entry = entry['company'] if 'name' in co_entry: print "~~~~~~~~~~~~~~~~~~~~~~ company name" print entry print entry['company'] coname = entry['company']['name'] print coname position.positions_company_name = coname position_models.append(position) cert = data.get('certifications',None) if cert != None: cert_name = cert['values'][0]['name'] user.certifications = cert_name mentor_topics = MentoreeTopic() mentor_topics.linkedin_id = user.linkedin_id user.summary = data.get('summary',None) user.picture_url = data.get('pictureUrl', None) current_user_id = user.linkedin_id # print "~~!!^_^!!~~" existing_user = dbsession.query(User).filter_by(linkedin_id = current_user_id).first() if existing_user == None: dbsession.add(user) dbsession.add(mentor_topics) for model in education_models: # print "model" # print model dbsession.add(model) for models in position_models: dbsession.add(models) dbsession.commit() return user class Education(Base): __tablename__="educations" id = Column(Integer, primary_key=True) linkedin_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = True) # educations educations_start_year = Column(Integer, nullable = True) educations_end_year = Column(Integer, nullable = True) educations_school_name = Column(String(200), nullable = True) educations_field_of_study = Column(String(200), nullable = True) educations_degree = Column(String(200), nullable = True) # ment_user = relationship("User", backref=backref("educations", order_by=id)) class Position(Base): __tablename__="positions" id = Column(Integer, primary_key=True) linkedin_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = True) positions_start_year = Column(Integer, nullable = True) positions_end_year = Column(Integer, nullable = True) positions_company_name = Column(String(200), nullable = True) positions_industry = Column(String(200), nullable = True) positions_title = Column(String(200), nullable = True) # ment_user = relationship("User", backref=backref("positions", order_by=id)) class MentoreeTopic(Base): __tablename__ = "mentoree_topics" id = Column(Integer, primary_key=True) topic_id = Column(Integer, ForeignKey('topics.topic_id'), nullable=True) mentor_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable=True) ment_user = relationship("User", backref=backref("mentoree_topics", order_by=id)) class Topic(Base): __tablename__ = "topics" topic_id = Column(Integer, primary_key=True) title = Column(String(100), nullable=True) class Endorsement(Base): __tablename__ = "endorsements" id = Column(Integer, primary_key=True) sender_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False) receiver_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False) title = Column(String(100), nullable=True) endorsements_text = Column(String(500), nullable=True) sent_date = Column(DateTime, nullable=True) sender = relationship("User", primaryjoin="User.linkedin_id==Endorsement.sender_id") receiver = relationship("User", primaryjoin="User.linkedin_id==Endorsement.receiver_id") class Email(Base): __tablename__ = "emails" id = Column(Integer, primary_key=True) sender_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False) receiver_id = Column(String(50), ForeignKey('users.linkedin_id'), nullable = False) subject = Column(String(100), nullable=True) text_body = Column(String(50000), nullable=True) sent_date = Column(DateTime, nullable=True) sender = relationship("User", primaryjoin="User.linkedin_id==Email.sender_id") receiver = relationship("User", primaryjoin="User.linkedin_id==Email.receiver_id") class Quote(Base): __tablename__ = "quotes" id = Column(Integer, primary_key=True) quote_author = Column(String(100), nullable=True) quote = Column(String(10000), nullable=True) def createTable(): Base.metadata.create_all(engine) def main(): """In case we need this for something""" pass if __name__ == "__main__": main()
/Project/topic_seed.py
import tabledef from tabledef import Topic TOPICS = {1: "Arts & Crafts", 2: "Career & Business", 3: "Community & Environment", 4: "Education & Learning", 5: "Fitness", 6: "Food & Drinks", 7: "Health & Well Being", 8: "Language & Ethnic Identity", 9: "Life Experiences", 10: "Literature & Writing", 11: "Motivation", 12: "New Age & Spirituality", 13: "Outdoors & Adventure", 14: "Parents & Family", 15: "Peer Pressure", 16: "Pets & Animals", 17: "Religion & Beliefs", 18: "Self-improvement/Growth", 19: "Sports & Recreation", 20: "Support", 21: "Tech", 22: "Women"} def seed_topic_table(): topics = [] for items in TOPICS: topics.append(Topic(title=TOPICS[items])) print "~~~~~ TOPICS ~~~~~~~" print topics tabledef.dbsession.add_all(topics) tabledef.dbsession.commit() seed_topic_table()
/server.py
from Project import app # app.run(debug=True) app.run(debug=True) app.secret_key = 'development'
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
enverbashirov/YOLOv3-mMwave-Radar
refs/heads/master
{"/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/truth.py", "/dataprep/processing.py"], "/yolo/predict.py": ["/yolo/darknet.py"], "/yolo/train.py": ["/yolo/darknet.py"]}
└── ├── __main__.py ├── dataprep │ ├── __init__.py │ ├── channel_extraction.py │ ├── kalman_tracker.py │ ├── processing.py │ ├── truth.py │ └── util.py ├── test.py └── yolo ├── __init__.py ├── darknet.py ├── dataset.py ├── predict.py ├── train.py └── util.py
/__main__.py
import argparse import sys import yolo import dataprep def parse_arg(): parser = argparse.ArgumentParser(description='mmWave YOLOv3', add_help=True, usage='''python . <action> [<args>] Actions: train Network training module predict Object detection module dataprep Data preprocessing module ''' ) parser.add_argument('Action', type=str, help='Action to run') return parser.parse_args(sys.argv[1:2]) args = parse_arg() if args.Action == 'train' or args.Action == 'predict': yolo.main(args) elif args.Action == 'dataprep': dataprep.main() else: print('Unknown action. Check "python . --help"')
/dataprep/__init__.py
import argparse import sys, gc from .channel_extraction import chext from .processing import proc from .truth import truth def parse_arg(): parser = argparse.ArgumentParser(description='Data preprocessing module', add_help=True) parser.add_argument('--pathin', type=str, required=True, help="Path for the input folder") parser.add_argument('--pathout', type=str, help="Path for the output folder") parser.add_argument('--saveprefix', type=str, help="Prefix for the save file") parser.add_argument('--chext', action='store_true', help="Perform channel extraction") parser.add_argument('--proc', action='store_true', help="Perform signal processing (FFT and denoising)") parser.add_argument('--truth', action='store_true', help="Perform ground truth (clustering, tracking) bouding box calculations") parser.add_argument('--objcount', type=int, default=1, help="Number of objects per image (default: 1)") parser.add_argument('--reso', type=int, default=416, help="Input image resolution (def: 416)") parser.add_argument('--v', type=int, default=0, help="Verbose (0 minimal (def), 1 normal, 2 all") return parser.parse_args(sys.argv[2:]) def main(): args = parse_arg() if args.chext: chext(args) gc.collect() if args.proc: proc(args) gc.collect() if args.truth: truth(args) gc.collect()
/dataprep/channel_extraction.py
import h5py import numpy as np import os, shutil def chext(args): rawpath = f'raw/{args.pathin}' savepath = f'dataset/{args.pathout}/chext' if args.pathout else f'dataset/{args.pathin}/chext' print(f'[LOG] ChExt | Starting: {args.pathin}') # Create the subsequent save folders # if os.path.isdir(savepath): # shutil.rmtree(savepath) if not os.path.isdir(savepath): os.makedirs(savepath) for i, fname in enumerate(os.listdir(rawpath)): logprefix = f'[LOG] ChExt | {i+1} / {len(os.listdir(rawpath))}' savename = f'{args.saveprefix}_seq_{i}' if args.saveprefix else f'{fname.split("_")[0]}_seq_{fname.split("_")[1].split(".")[0]}' print(f'{logprefix} fname', end='\r') channel_extraction( f'{rawpath}/{fname}', savepath, savename, action='SAVE', logprefix=logprefix) print('\n') def channel_extraction(loadpath, savepath, savename, action, logprefix='', nr_chn=16): with h5py.File(loadpath, 'r+') as h5data: print(f'{logprefix} Initializing: {loadpath}', end='\r') Data = np.zeros((h5data['Chn1'].shape[1], nr_chn, h5data['Chn1'].shape[0]), dtype=np.float32) for i in range(nr_chn): print(f'{logprefix} Extracting channel {i+1} \t\t\t', end='\r') channel = np.asarray(h5data['Chn{}'.format(i+1)]) Data[:, i, :] = channel.T print(f'{logprefix} Finalizing {savepath}', end='\r') if action == 'SAVE': print(f'{logprefix} Saving', end='\r') np.save(f'{savepath}/{savename}', Data) print(f'{logprefix} Saved: {savepath}/{savename} Data shape: {Data.shape}') elif action == 'RETURN': return Data else: print(f'[ERR] ChExt | Invalid action, please select SAVE or RETURN')
/dataprep/kalman_tracker.py
import matplotlib.animation as animation import numpy as np import scipy as sp from matplotlib import pyplot as plt class KalmanTracker: def __init__(self, id_, s0=None, disable_rejection_check=False): # Filter-related parameters self.dt = 66.667e-3 # T_int of the radar TX # state transition matrix self.F = np.kron(np.eye(2), np.array([[1, self.dt], [0, 1]])) # # state-acceleration matrix self.G = np.array([0.5*(self.dt**2), self.dt]).reshape(2, 1) # # observation matrix self.H = np.array([[1, 0, 0, 0], [0, 0, 1, 0]]) # measurement covariance matrix self.R = np.array([[0.5, 0], [0, 0.5]]) # [wagner2017radar] # initial state covariance self.P = 0.2*np.eye(4) # state noise variance self.sigma_a = 8 # [wagner2017radar] # state noise covariance self.Q = np.kron(np.eye(2), np.matmul(self.G, self.G.T)*self.sigma_a**2) self.n = self.F.shape[1] self.m = self.H.shape[1] # initial state self.s = np.zeros((self.n, 1)) if s0 is None else s0 self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1) self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1) self.REJECT_THR = 4.605 self.disable_rejection_check = disable_rejection_check ######################################################### # Tracker-related parameters self.misses_number = 0 self.hits = 0 self.id = id_ self.box = np.array([]) self.state_memory = [] self.identity_label = 'UNK' # initialize as unknown cluster self.id_dict = {-1: 'UNK', 0: 'S1', 1: 'S2', 2:'S3', 3:'S4'} # self.id_dict = {-1: 'UNK', 0: 'JP', 1: 'FM', 2:'GP', 3:'RF'} def transform_obs(self, z): z_prime = np.array([z[0]*np.cos(z[1]), z[0]*np.sin(z[1])]).reshape(-1, 1) return z_prime def reject_obs(self, i, S): chi_squared = np.matmul(np.matmul(i.T, np.linalg.inv(S)), i)[0, 0] return chi_squared >= self.REJECT_THR def predict(self): # a_x = np.random.normal(0, self.sigma_a) # a_y = np.random.normal(0, self.sigma_a) self.s = np.matmul(self.F, self.s) # check that x has the correct shape assert self.s.shape == (self.n, 1) self.P = np.matmul(np.matmul(self.F, self.P), self.F.T) + self.Q self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1) self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1) return self.s, self.xy def update(self, z): z = self.transform_obs(z) # innovation y = z - np.matmul(self.H, self.s) S = np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R if (not self.reject_obs(y, S)) or self.disable_rejection_check: K = np.matmul(np.matmul(self.P, self.H.T), np.linalg.inv(S)) self.s = self.s + np.matmul(K, y) assert self.s.shape == (self.n, 1) self.P = np.matmul(np.eye(self.n) - np.matmul(K, self.H), self.P) self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1) self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1) self.state_memory.append(self.xy) return self.s, self.xy else: self.state_memory.append(self.xy) return self.s, self.xy def get_S(self): return np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R @staticmethod def get_mahalanobis_distance(x, C): # returns Mahalanobis distance given the differece vector x and covariance C return np.matmul(np.matmul(x.T, np.linalg.inv(C)), x)[0, 0] @staticmethod def hungarian_assignment(score_matrix): # call the scipy implementation of Hungarian alg. det_idx, tr_idx = sp.optimize.linear_sum_assignment(score_matrix) unmatched, undetected = [], [] for t in range(score_matrix.shape[1]): if t not in tr_idx: undetected.append(t) for d in range(score_matrix.shape[0]): if d not in det_idx: unmatched.append(d) matches = [] for d, t in zip(det_idx, tr_idx): matches.append(np.array([d, t]).reshape(1, 2)) if len(matches) == 0: matches = np.empty((0, 2), dtype=int) else: matches = np.concatenate(matches, axis=0) return matches, np.array(undetected), np.array(unmatched)
/dataprep/processing.py
import os, shutil, gc from argparse import ArgumentParser from time import sleep import h5py import numpy as np import scipy as sp from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy import io, signal from scipy.signal.windows import nuttall, taylor from .util import * def proc(args): rawpath = f'dataset/{args.pathin}/chext' savepath = f'dataset/{args.pathout}/proc' if args.pathout else f'dataset/{args.pathin}/proc' print(f'[LOG] Proc | Starting: {args.pathin}') # Create the subsequent save folders # if os.path.isdir(savepath): # shutil.rmtree(savepath) if not os.path.isdir(savepath): os.makedirs(savepath + '/raw/') os.mkdir(savepath + '/denoised/') # # # PARAMETERS INIT # # # c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12) # speed of light f_start = 76e9 f_stop = 78e9 # Tramp_up = 180e-6 # Tramp_down = 32e-6 Tp = 250e-6 # T_int = 66.667e-3 N = 512 # N_frames = 1250 N_loop = 256 # Tx_power = 100 kf = 1.1106e13 BrdFuSca = 4.8828e-5 fs = 2.8571e6 fc = (f_start + f_stop)/2 # # # CONFIGURE SIGNAL PROCESSING # # # # # Range dimension NFFT = 2**10 # number of fft points in range dim nr_chn = 16 # number of channels # fft will be computed using a hannng window to lower border effects win_range = np.broadcast_to(np.hanning(N-1), (N_loop, nr_chn, N-1)).T # integral of the window for normalization # print(win_range.shape) sca_win = np.sum(win_range[:, 0, 0]) v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf) # vector of range values for each range bin r_min = 0 # min range considered r_max = 10 # max range considered arg_rmin = np.argmin(np.abs(v_range - r_min)) # index of the min range considered value arg_rmax = np.argmin(np.abs(v_range - r_max)) # index of the max range considered value vrange_ext = v_range[arg_rmin:arg_rmax+1] # vector of range values from rmin to rmax # # Doppler dimension NFFT_vel = 256 # number of fft points in angle dim win_vel = np.broadcast_to(np.hanning(N_loop).reshape(1, 1, -1), (vrange_ext.shape[0], nr_chn, N_loop)) scawin_vel = np.sum(win_vel[0, 0, :]) vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp) # vector of considered frequencies in Doppler dim v_vel = vfreq_vel*c0/(2*fc) # transform freqs into velocities v_vel = np.delete(v_vel, np.arange(124, 132)) # delete velocities close to 0 # # Angle dimension NFFT_ant = 64 # number of fft points in angle dim win_ant = np.broadcast_to(taylor(nr_chn, nbar=20, sll=20).reshape(1,-1,1), (vrange_ext.shape[0], nr_chn, NFFT_vel)) scawin_ant = np.sum(win_ant[0, :, 0]) # win_ant = np.tile(win_ant, (len(vrange_ext), 1)) # vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180 # vector of considered angles [-90, 90-dtheta] # print(vang_deg) # print(deg2rad_shift(vang_deg)) # ant_idx = np.concatenate([np.arange(nr_chn), np.arange(nr_chn+1, 2*nr_chn)]) # indices of virtual antenna elements # ant_idx = np.arange(nr_chn) cal_data = io.loadmat('dataprep/calibration.mat')['CalData'] # load complex calibration weights for each antenna element cal_data = cal_data[:16] # keep weights for TX1 only mcal_data = np.broadcast_to(cal_data, (N-1, cal_data.shape[0], N_loop)) # # # PROCESS THE RDA SLICES FOR EACH FRAME # # # # sequences = [1, 2, 3, 4, 5, 6] # this is just as an example, you should put here the ids of the sequences you want to process # sequences = range(0, len(os.listdir(rawpath))) # this is just as an example, you should put here the ids of the sequences you want to process for i, fname in enumerate(os.listdir(rawpath)): frawname = fname.split('.')[0] logprefix = f'[LOG] Proc | {i+1} / {len(os.listdir(rawpath))} {frawname}' print(f'{logprefix} {fname}', end='\r') Data_orig = np.load(f'{rawpath}/{fname}') # print(f'{logprefix} Original data shape: {Data_orig.shape}', end='\r') parts = [0, 1, 2, 3] SIDELOBE_LEVEL = 3 LINTHR_HIGH = -97 LINTHR_LOW = -107 for part in parts: # split processing in parts for memory, each track is split in 4 savename = f'{args.saveprefix}_seq_{frawname.split("_")[2]}_sub_{part}' \ if args.saveprefix else f'{frawname}_sub_{part}' logprefix = f'[LOG] Proc | {i*len(parts)+part+1} / {len(os.listdir(rawpath))*len(parts)} {frawname}' print(f'{logprefix} {savename}', end='\r') Data = Data_orig[:, :, part*32000:(part+1)*32000] # each part has 32k blocks (128k/4) split_locs = np.arange(Data.shape[2], step=N_loop, dtype=np.int)[1:] Data = np.stack(np.split(Data, split_locs, axis=2)[:-1], axis=-1) # split data into a sequence of radar cubes print(f'{logprefix} Time-split \t\t\t', end='\r') nsteps = Data.shape[-1] # last dim is time rda_data = np.zeros((len(vrange_ext), NFFT_ant, NFFT_vel, nsteps), dtype=np.float32) raw_ra = np.zeros((len(vrange_ext), NFFT_ant, nsteps), dtype=np.float32) for j in range(nsteps): # loop on the timesteps print(f'{logprefix} Timestep: {j+1} \t\t\t', end='\r') RawRadarCube = Data[1:, :, :, j] # print(RawRadarCube.shape) # Range fft: window, calibration and scaling are applied range_profile = np.fft.fft(RawRadarCube*win_range*mcal_data, NFFT, axis=0)*BrdFuSca/sca_win rp_ext = range_profile[arg_rmin:arg_rmax+1] # extract only ranges of interest (0 to 10 m) # background subtraction for MTI rp_ext -= np.mean(rp_ext, axis=2, keepdims=True) # Doppler fft range_doppler = np.fft.fftshift(np.fft.fft(rp_ext*win_vel, NFFT_vel, axis=2)/scawin_vel, axes=2) # Angle fft range_angle_doppler = np.fft.fftshift(np.fft.fft(range_doppler*win_ant, NFFT_ant, axis=1)/scawin_ant, axes=1) # absolute value + 20log10 to compute power range_angle_doppler = 20*np.log10(np.abs(range_angle_doppler)) # fig, ax = plt.subplots(1, 2) # ax[0].imshow(range_angle_doppler.max(2)) # ax[1].imshow(range_angle_doppler.max(1)) # plt.show() raw_ra[..., j] = range_angle_doppler.max(2) # store raw range-angle image # at this point you have the RDA representation and you can apply further denoising rdep_thr = np.linspace(LINTHR_HIGH, LINTHR_LOW, range_angle_doppler.shape[0]).reshape((-1, 1, 1)) range_angle_doppler -= rdep_thr range_angle_doppler[range_angle_doppler < 0] = 0 maxs = np.max(range_angle_doppler, axis=1).reshape(range_angle_doppler.shape[0], 1, range_angle_doppler.shape[2]) # maxs = np.max(range_angle_doppler, axis=(0, 2)).reshape(1, range_angle_doppler.shape[1], 1) threshold = maxs - SIDELOBE_LEVEL range_angle_doppler[range_angle_doppler < threshold] = 0 rda_data[..., j] = range_angle_doppler # fig, ax = plt.subplots(1, 2) # ax[0].imshow(range_angle_doppler.max(2)) # ax[1].imshow(range_angle_doppler.max(1)) # plt.show() print(f'{logprefix} Saving: {savename} \t\t\t') np.save(f'{savepath}/denoised/{savename}.npy', rda_data) np.save(f'{savepath}/raw/{savename}.npy', raw_ra) del Data, rda_data, split_locs, raw_ra gc.collect() del Data_orig gc.collect() print('\n')
/dataprep/truth.py
import os # import shutil, time, pickle # from argparse import ArgumentParser # import matplotlib import matplotlib.patches as patches from matplotlib import pyplot as plt # from matplotlib import rc import numpy as np from sklearn.cluster import DBSCAN # from .channel_extraction import ChannelExtraction from .util import Cluster, deg2rad_shift, get_box from .kalman_tracker import KalmanTracker def truth(args): action = 'save' rawpath = f'dataset/{args.pathin}/proc' savepath = f'dataset/{args.pathout}/final' if args.pathout else f'dataset/{args.pathin}/final' print(f'[LOG] Truth | Starting: {args.pathin}') # Create the subsequent save folders # if os.path.isdir(savepath): # shutil.rmtree(savepath) if not os.path.isdir(savepath): os.makedirs(savepath) for i, fname in enumerate(os.listdir(rawpath + '/denoised')): frawname = args.saveprefix if args.saveprefix else args.pathin frawname = f'{frawname}_ra_{fname.split("_")[2]}{fname.split("_")[4].split(".")[0]}' logprefix = f'[LOG] Truth | {i+1} / {len(os.listdir(rawpath + "/denoised"))}' print(f'{logprefix} {frawname}', end='\r') # starting index in the loaded data start = 10 # load RDA data, MUST have 4D shape: (N_range_bins, N_angle_bins, N_doppler_bins, N_timesteps) rda_data = np.load(f'{rawpath}/denoised/{fname}')[..., start:] raw_ra_seq = np.load(f'{rawpath}/raw/{fname}')[..., start:] # path where to save the resulting figures # initialize clustering/tracker parameters MAX_AGE = 10 MIN_DET_NUMBER = 15 MIN_PTS_THR = 30 MIN_SAMPLES = 40 EPS = 0.04 thr = 20 # assoc_score = 'Mahalanobis' # either 'IOU' or 'Mahalanobis' # CLASS_CONF_THR = 0.0 # init radar parameters c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12) f_start = 76e9 f_stop = 78e9 # Tramp_up = 180e-6 # Tramp_down = 32e-6 Tp = 250e-6 # T_int = 66.667e-3 # N = 512 # N_loop = 256 # Tx_power = 100 kf = 1.1106e13 # BrdFuSca = 4.8828e-5 fs = 2.8571e6 fc = (f_start + f_stop)/2 # compute range angle doppler intervals NFFT = 2**10 # nr_chn = 16 v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf) r_min = 0.5 r_max = 10 arg_rmin = np.argmin(np.abs(v_range - r_min)) arg_rmax = np.argmin(np.abs(v_range - r_max)) vrange_ext = v_range[arg_rmin:arg_rmax+1] NFFT_ant = 64 vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180 NFFT_vel = 256 vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp) v_vel = vfreq_vel*c0/(2*fc) # delta_r = vrange_ext[1] - vrange_ext[0] # delta_v = v_vel[1] - v_vel[0] # delta_a = vang_deg[1] - vang_deg[0] track_id_list = list(range(1000)) # list with possible track id numbers tracking_list = [] # loop over the time-steps for timestep in range(rda_data.shape[-1]): print(f'{logprefix} {frawname} Timestep: {timestep} \t\t\t', end='\r') # select RDA map of the current time-step data = rda_data[..., timestep] data = data[arg_rmin:arg_rmax + 1] # plt.imshow(data.max(1)) # plt.show() # compute normalized maps for DBSCAN norm_ang = (vang_deg - np.min(vang_deg)) / (np.max(vang_deg) - np.min(vang_deg)) norm_vel = (v_vel - np.min(v_vel)) / (np.max(v_vel) - np.min(v_vel)) norm_ran = (vrange_ext - np.min(vrange_ext)) / (np.max(vrange_ext) - np.min(vrange_ext)) rav_pts = np.asarray(np.meshgrid(vrange_ext, vang_deg, v_vel, indexing='ij')) # print(rav_pts[1, :, :, 0]) norm_rav_pts = np.asarray(np.meshgrid(norm_ran, norm_ang, norm_vel, indexing='ij')) # select values which are over the threshold raw_ra = raw_ra_seq[arg_rmin:arg_rmax + 1, :, timestep] full_indices = (data > thr) data[data < thr] = 0 rav_pts = rav_pts[:, full_indices] power_values_full = data[full_indices] norm_rav_pts = norm_rav_pts[:, full_indices] rav_pts_lin = rav_pts.reshape(rav_pts.shape[0], -1) # save range and angle for tracking ra_totrack = np.copy(rav_pts_lin[:2, :]) ra_totrack[1] = deg2rad_shift(ra_totrack[1]) normrav_pts_lin = norm_rav_pts.reshape(norm_rav_pts.shape[0], -1) if rav_pts.shape[1] > MIN_SAMPLES: # apply DBSCAN on normalized RDA map labels = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit_predict(normrav_pts_lin.T) unique, counts = np.unique(labels, return_counts=True) if not len(unique): print('[WAR] Truth | DBSCAN found no clusters! Skipping frame.') continue else: print('[WAR] Truth | No points to cluster! Skipping frame.') continue # loop over the detected clusters detected_clusters = [] # list containing all the detected clusters for cluster_id in unique: if cluster_id == -1: # -1 is the label for noise in DBSCAN, skip it continue number = counts[unique == cluster_id] if number < MIN_PTS_THR: continue # initialize new cluster object and fill its fields new_cluster = Cluster(cluster_id) new_cluster.cardinality = number new_cluster.elements = ra_totrack[:, labels == cluster_id] # range and angle new_cluster.dopplers = rav_pts_lin[2, labels == cluster_id] w = np.squeeze(power_values_full[labels == cluster_id]) weights = w/np.sum(w) # normalized powers new_cluster.center_polar = np.average(new_cluster.elements, weights=weights, axis=1).reshape(2, 1) new_cluster.center_cartesian = np.array([new_cluster.center_polar[0]*np.cos(new_cluster.center_polar[1]), new_cluster.center_polar[0]*np.sin(new_cluster.center_polar[1])], dtype=np.float64).reshape(-1, 1) new_cluster.box = get_box(new_cluster) detected_clusters.append(new_cluster) if not timestep: # happens only in the first time-step for cl in detected_clusters: tracking_list.append(KalmanTracker(id_=track_id_list.pop(0), s0=np.array([cl.center_cartesian[0], 0, cl.center_cartesian[1], 0], dtype=np.float64).reshape(-1,1))) tracking_list[-1].box = cl.box sel_tracking_list = np.copy(tracking_list) elif timestep: # happens in all other time-steps # prepare the data association building the cost matrix detected_centers = [x.center_cartesian for x in detected_clusters] prev_cartcenters = [] prev_centers = [] if len(tracking_list) > 0: for trk in tracking_list: prev_cartcenters.append(trk.xy) prev_centers.append(trk.rtheta) cost_matrix = np.zeros((len(detected_centers), len(prev_cartcenters))) for i in range(len(detected_centers)): for j in range(len(prev_cartcenters)): # cost is the Mahalanobis distance cost_matrix[i, j] = KalmanTracker.get_mahalanobis_distance( detected_centers[i] - prev_cartcenters[j], tracking_list[j].get_S()) cost_matrix = np.asarray(cost_matrix) # hungarian algorithm for track association matches, undet, _ = KalmanTracker.hungarian_assignment(cost_matrix) # handle matched tracks if len(matches) > 0: for detec_idx, track_idx in matches: # get observation, polar coords center of the detected cluster obs = detected_clusters[detec_idx].center_polar # get tracker object of the detection current_tracker = tracking_list[track_idx] # KF predict-update step current_tracker.predict() current_tracker.update(obs.reshape(2, 1)) current_tracker.box = get_box(detected_clusters[detec_idx]) current_tracker.hits += 1 current_tracker.misses_number = 0 # imaging(current_tracker, detected_clusters[detec_idx], data, labels, full_indices.ravel()) else: print('[WAR] Truth | No detections-tracks matches found! Skipping frame.') continue # deal with undetected tracks if len(undet) > 0: for track_idx in undet: old_tracker = tracking_list[track_idx] old_tracker.misses_number += 1 # predict only as no obs is detected old_tracker.predict() old_tracker.box = get_box(None, c=old_tracker.xy, h=old_tracker.box[0], w=old_tracker.box[0]) # filter out tracks outside room borders (ghost targets) tracking_list = [t for t in tracking_list if (t.xy[0] > -1.70) and (t.xy[0] < 2.30)] # kill tracks outside the room boundaries # select the valid tracks, i.e., the ones with less than the max. misses and enough hits sel_tracking_list = [t for t in tracking_list if (t.misses_number <= MAX_AGE) and (t.hits >= MIN_DET_NUMBER)] plot4train(f'{savepath}/{frawname}{int(4-len(str(timestep)))*"0"}{timestep}', data, raw_ra, sel_tracking_list, vrange_ext, vang_deg, args.reso, action) print(f'[LOG] Truth | Truth data ready: {savepath}') def imaging(tracker, cluster, data, labels, full_indices): flat_data = np.copy(data.ravel()) full_data = flat_data[full_indices] full_data[labels != cluster.label] = 0 flat_data[full_indices] = full_data flat_data = flat_data.reshape(data.shape) # print(flat_data.shape) ra = flat_data.max(2) rd = flat_data.max(1) plt.subplot(121) plt.imshow(rd, aspect='auto') plt.subplot(122) plt.imshow(ra, aspect='auto', extent=(np.pi, 0.25065, 0.5, 10)) plt.scatter(tracker.rtheta[1], tracker.rtheta[0], marker='x', c='r') plt.colorbar() plt.show() plt.close() def plot(path, data_points, ra, noisy_ramap, t_list, action, index, ranges, angles): boxes = np.array([kt.box for kt in t_list]) angles = deg2rad_shift(angles) # ramap = data_points.mean(2) _, ax = plt.subplots(1, 2) ax[0].set_title('Point-cloud representation') ax[1].set_title('RA map image representation') ax[0].scatter(ra[1], ra[0], marker='.')#, c=labels) ax[1].imshow(noisy_ramap, aspect='auto') ax[0].set_xlabel(r'$\theta$ [rad]') ax[0].set_ylabel(r'$R$ [m]') ax[0].set_xlim([0.25065, np.pi]) ax[0].set_ylim([0.5, 10]) ax[0].grid() for i in range(len(boxes)): # add real valued bb on point cloud plot add_bb(boxes[i], ax[0], t_list[i].id) # add pixel-level bb to ra image int_box = adjust_bb(boxes[i], ranges, angles) add_bb(int_box, ax[1], t_list[i].id) if action == 'save': plt.savefig(path + f'fig_{index}', format='png', dpi=300) plt.close() elif action == 'plot': plt.title(f'Frame {index}') plt.show() plt.close() def plot4train(path, data_points, noisy_ramap, t_list, ranges, angles, reso=416, action='save'): boxes = np.array([kt.box for kt in t_list]) angles = deg2rad_shift(angles) fig = plt.figure(figsize=(1, 1), dpi=reso, frameon=False) ax = fig.add_axes([0, 0, 1, 1]) ax.axis('off') ax.imshow(noisy_ramap, aspect='auto') w_scale = reso/len(angles) h_scale = reso/len(ranges) bbs = [] for i in range(0,min(4, len(boxes))): # # add pixel-level bb to ra image bb = adjust_bb(boxes[i], ranges, angles, w_scale, h_scale) bbs.append(list(map(int, [bb[1][0], bb[0][0], bb[3][0], bb[2][0]]))) # add_bb(bb, ax, t_list[i].id) if bbs and action == 'save': plt.savefig(f'{path}_{bbs}.png'.replace(' ', ''), format='png', dpi=reso) elif action == 'plot': plt.show() plt.close() def add_bb(bb, ax, note): ax.add_patch(patches.Rectangle((bb[1] - bb[3]/2, bb[0] - bb[2]/2), # top left corner coordinates bb[3], # width bb[2], # height linewidth=1, edgecolor='r', facecolor='none')) def adjust_bb(bb_real, r, a, w_scale = 1, h_scale = 1): ''' this function is needed to map the bb obtained in real values to the image pixel coordinates without the bias introduced by non-uniform spacing of angle bins ''' bb_ind = np.zeros(bb_real.shape[0]) bb_ind[0] = np.argmin(np.abs(r - bb_real[0])) * h_scale bb_ind[1] = np.argmin(np.abs(a - bb_real[1])) * w_scale top = np.argmin(np.abs(r - (bb_real[0] - bb_real[2]/2))) bottom = np.argmin(np.abs(r - (bb_real[0] + bb_real[2]/2))) left = np.argmin(np.abs(a - (bb_real[1] + bb_real[3]/2))) right = np.argmin(np.abs(a - (bb_real[1] - bb_real[3]/2))) bb_ind[2] = np.abs(top - bottom) * h_scale bb_ind[3] = np.abs(left - right) * w_scale return bb_ind.reshape(-1, 1)
/dataprep/util.py
import os import shutil from dataclasses import dataclass, field from typing import List import h5py import matplotlib.patches as patches import matplotlib.pyplot as plt import numpy as np import pandas as pd @dataclass class Cluster: # cluster object, contains detected cluster points and additional values label: int cardinality: int = 0 elements: List = field(default_factory=list) dopplers: List = field(default_factory=list) center_polar: np.ndarray = np.empty((2, 1)) center_cartesian: np.ndarray = np.empty((2, 1)) box: np.ndarray = np.empty((4, 1)) def polar2cartesian(xp): # angles in rad return np.array([xp[0]*np.cos(xp[1]), xp[0]*np.sin(xp[1])], dtype=np.float64).reshape(-1, 1) def cartesian2polar(xy): # angles in rad return np.array([np.sqrt(xy[0]**2 + xy[1]**2), np.arctan2(xy[1], xy[0])]).reshape(-1, 1) def deg2rad_shift(angles): a = np.copy(angles) a = np.pi*a/180 a = -a + np.pi/2 return a def shift_rad2deg(angles): a = np.copy(angles) a = -a + np.pi/2 a = 180*a/np.pi return a def get_box(cluster, c=None, h=0.5, w=0.3): if cluster is not None: r_ext = cluster.elements[0].max() - cluster.elements[0].min() # print(cluster.elements[1]) a_ext = cluster.elements[1].max() - cluster.elements[1].min() out = np.array([cluster.center_polar[0].squeeze(), cluster.center_polar[1].squeeze(), r_ext, a_ext]).reshape(4, 1) return out else: return np.array([c[0], c[1], h, w]).reshape(4, 1) def IOU_score(a, b): # returns the IOU score of the two input boxes x1 = max(a[0], b[0]) y1 = max(a[1], b[1]) x2 = min(a[2], b[2]) y2 = min(a[3], b[3]) width = x2 - x1 height = y2 - y1 if (width < 0) or (height < 0): return 0.0 area_intersection = width*height area_a = (a[2] - a[0])*(a[3] - a[1]) area_b = (b[2] - b[0])*(b[3] - b[1]) area_union = area_a + area_b - area_intersection return area_intersection/area_union
/test.py
import torch import numpy as np import os l = [{'test': 0, 'test2': 1}, {'test': 3, 'test2': 4}] print(l) for i, j in enumerate(l): print(i) print(l)
/yolo/__init__.py
import gc from .train import train from .predict import predict def main(args): gc.collect() if args.Action == 'train': train() elif args.Action == 'predict': predict() gc.collect()
/yolo/darknet.py
from __future__ import division import torch, torch.nn as nn, torch.nn.functional as F # from torch.autograd import Variable import numpy as np # import cv2 # from pprint import pprint from .util import * # ================================================================= # MAXPOOL (with stride = 1, NOT SURE IF NEEDED) class MaxPool1s(nn.Module): def __init__(self, kernel_size): super(MaxPool1s, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode="replicate") pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x # EMPTY LAYER class EmptyLayer(nn.Module): def __init__(self): super(EmptyLayer, self).__init__() # YOLO / PREDICTION LAYER class YOLOLayer(nn.Module): def __init__(self, anchors, num_classes, reso, ignore_thresh): super(YOLOLayer, self).__init__() self.anchors = anchors self.num_classes = num_classes self.reso = reso self.ignore_thresh = ignore_thresh def forward(self, x, y_true=None): bs, _, gs, _ = x.size() stride = self.reso // gs # no pooling used, stride is the only downsample num_attrs = 5 + self.num_classes # tx, ty, tw, th, p0 nA = len(self.anchors) scaled_anchors = torch.Tensor( [(a_w / stride, a_h / stride) for a_w, a_h in self.anchors]).cuda() # Re-organize [bs, (5+nC)*nA, gs, gs] => [bs, nA, gs, gs, 5+nC] x = x.view(bs, nA, num_attrs, gs, gs).permute( 0, 1, 3, 4, 2).contiguous() pred = torch.Tensor(bs, nA, gs, gs, num_attrs).cuda() pred_tx = torch.sigmoid(x[..., 0]).cuda() pred_ty = torch.sigmoid(x[..., 1]).cuda() pred_tw = x[..., 2].cuda() pred_th = x[..., 3].cuda() pred_conf = torch.sigmoid(x[..., 4]).cuda() if self.training == True: pred_cls = x[..., 5:].cuda() # softmax in cross entropy else: pred_cls = F.softmax(x[..., 5:], dim=-1).cuda() # class grid_x = torch.arange(gs).repeat(gs, 1).view( [1, 1, gs, gs]).float().cuda() grid_y = torch.arange(gs).repeat(gs, 1).t().view( [1, 1, gs, gs]).float().cuda() anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1)) anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1)) pred[..., 0] = pred_tx + grid_x pred[..., 1] = pred_ty + grid_y pred[..., 2] = torch.exp(pred_tw) * anchor_w pred[..., 3] = torch.exp(pred_th) * anchor_h pred[..., 4] = pred_conf pred[..., 5:] = pred_cls if not self.training: pred[..., :4] *= stride return pred.view(bs, -1, num_attrs) else: loss = YOLOLoss([bs, nA, gs], scaled_anchors, self.num_classes, pred, [pred_tx, pred_ty, pred_tw, pred_th]) loss = loss(x, y_true.float()) return loss # YOLOv3 Loss class YOLOLoss(nn.Module): def __init__(self, shape, scaled_anchors, num_classes, pred, pred_t): super(YOLOLoss, self).__init__() self.bs = shape[0] self.nA = shape[1] self.gs = shape[2] self.scaled_anchors = scaled_anchors self.num_classes = num_classes self.predictions = pred self.pred_conf = pred[..., 4] self.pred_cls = pred[..., 5:] self.pred_tx = pred_t[0] self.pred_ty = pred_t[1] self.pred_tw = pred_t[2] self.pred_th = pred_t[3] def forward(self, x, y_true): gt_tx = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() gt_ty = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() gt_tw = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() gt_th = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() gt_conf = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() gt_cls = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() obj_mask = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda() for idx in range(self.bs): for y_true_one in y_true[idx]: y_true_one = y_true_one.cuda() gt_bbox = y_true_one[:4] * self.gs gt_cls_label = int(y_true_one[4]) gt_xc, gt_yc, gt_w, gt_h = gt_bbox[0:4] gt_i = gt_xc.long().cuda() gt_j = gt_yc.long().cuda() pred_bbox = self.predictions[idx, :, gt_j, gt_i, :4] ious = IoU(xywh2xyxy(pred_bbox), xywh2xyxy(gt_bbox)) best_iou, best_a = torch.max(ious, 0) w, h = self.scaled_anchors[best_a] gt_tw[idx, best_a, gt_j, gt_i] = torch.log(gt_w / w) gt_th[idx, best_a, gt_j, gt_i] = torch.log(gt_h / h) gt_tx[idx, best_a, gt_j, gt_i] = gt_xc - gt_i.float() gt_ty[idx, best_a, gt_j, gt_i] = gt_yc - gt_j.float() gt_conf[idx, best_a, gt_j, gt_i] = best_iou gt_cls[idx, best_a, gt_j, gt_i] = gt_cls_label obj_mask[idx, best_a, gt_j, gt_i] = 1 MSELoss = nn.MSELoss(reduction='sum') BCELoss = nn.BCELoss(reduction='sum') CELoss = nn.CrossEntropyLoss(reduction='sum') loss = dict() # Xc, Yc, W, H loss calculation loss['x'] = MSELoss(self.pred_tx * obj_mask, gt_tx * obj_mask) loss['y'] = MSELoss(self.pred_ty * obj_mask, gt_ty * obj_mask) loss['w'] = MSELoss(self.pred_tw * obj_mask, gt_tw * obj_mask) loss['h'] = MSELoss(self.pred_th * obj_mask, gt_th * obj_mask) # CLASS loss calculation # loss['cls'] = BCELoss(pred_cls * obj_mask, cls_mask * obj_mask) loss['cls'] = CELoss((self.pred_cls * obj_mask.unsqueeze(-1)).view(-1, self.num_classes), (gt_cls * obj_mask).view(-1).long()) # OBJECTIVENESS loss calculation # loss['conf'] = MSELoss(self.pred_conf * obj_mask * 5, gt_conf * obj_mask * 5) + \ # MSELoss(self.pred_conf * (1 - obj_mask), gt_conf * (1 - obj_mask)) lambda_noobj = 0.5 loss['conf'] = BCELoss(self.pred_conf * obj_mask, (gt_conf * obj_mask).detach()) + \ lambda_noobj * BCELoss(self.pred_conf * (1 - obj_mask), (gt_conf * (1 - obj_mask)).detach()) # pprint(loss) return loss # Non-Max Suppression class NMSLayer(nn.Module): """ NMS layer which performs Non-maximum Suppression 1. Filter background 2. Get prediction with particular class 3. Sort by confidence 4. Suppress non-max prediction """ def __init__(self, conf_thresh=0.65, nms_thresh=0.55): """ Args: - conf_thresh: (float) fore-ground confidence threshold - nms_thresh: (float) nms threshold """ super(NMSLayer, self).__init__() self.conf_thresh = conf_thresh self.nms_thresh = nms_thresh def forward(self, x): """ Args x: (Tensor) prediction feature map, with size [bs, num_bboxes, 5 + nC] Returns predictions: (Tensor) prediction result with size [num_bboxes, [image_batch_idx, 4 offsets, p_obj, max_conf, cls_idx]] """ bs, _, _ = x.size() predictions = torch.Tensor().cuda() for idx in range(bs): pred = x[idx] try: non_zero_pred = pred[pred[:, 4] > self.conf_thresh] non_zero_pred[:, :4] = xywh2xyxy(non_zero_pred[:, :4]) max_score, max_idx = torch.max(non_zero_pred[:, 5:], 1) max_idx = max_idx.float().unsqueeze(1) max_score = max_score.float().unsqueeze(1) non_zero_pred = torch.cat( (non_zero_pred[:, :5], max_score, max_idx), 1) classes = torch.unique(non_zero_pred[:, -1]) except Exception: # no object predicted print('No object predicted') continue for cls in classes: cls_pred = non_zero_pred[non_zero_pred[:, -1] == cls] conf_sort_idx = torch.sort(cls_pred[:, 5], descending=True)[1] cls_pred = cls_pred[conf_sort_idx] max_preds = [] while cls_pred.size(0) > 0: max_preds.append(cls_pred[0].unsqueeze(0)) ious = IoU(max_preds[-1], cls_pred) cls_pred = cls_pred[ious < self.nms_thresh] if len(max_preds) > 0: max_preds = torch.cat(max_preds).data batch_idx = max_preds.new(max_preds.size(0), 1).fill_(idx) seq = (batch_idx, max_preds) predictions = torch.cat(seq, 1) if predictions.size( 0) == 0 else torch.cat((predictions, torch.cat(seq, 1))) return predictions # ================================================================= # NETWORK class DarkNet(nn.Module): def __init__(self, cfg, reso=416, thr_obj=0.5, thr_nms=0.5): super(DarkNet, self).__init__() self.blocks = parse_cfg(cfg) self.reso, self.thr_obj, self.thr_nms = reso, thr_obj, thr_nms self.net_info, self.module_list = self.create_modules(self.blocks) self.nms = NMSLayer(self.thr_obj, self.thr_nms) def forward(self, x, y_true=None, CUDA=False): modules = self.blocks[1:] predictions = torch.Tensor().cuda() if CUDA else torch.Tensor() outputs = dict() #We cache the outputs for the route layer loss = dict() for i, module in enumerate(modules): if module["type"] == "convolutional" or module["type"] == "upsample": x = self.module_list[i](x) outputs[i] = x elif module["type"] == "shortcut": from_ = int(module["from"]) x = outputs[i-1] + outputs[i+from_] outputs[i] = x elif module["type"] == "route": layers = module["layers"] layers = [int(a) for a in layers] if (layers[0]) > 0: layers[0] = layers[0] - i if len(layers) == 1: x = outputs[i + (layers[0])] else: if (layers[1]) > 0: layers[1] = layers[1] - i map1 = outputs[i + layers[0]] map2 = outputs[i + layers[1]] x = torch.cat((map1, map2), 1) outputs[i] = x elif module["type"] == 'yolo': if self.training == True: loss_part = self.module_list[i][0](x, y_true) for key, value in loss_part.items(): value = value loss[key] = loss[key] + \ value if key in loss.keys() else value loss['total'] = loss['total'] + \ value if 'total' in loss.keys() else value else: x = self.module_list[i][0](x) predictions = x if len(predictions.size()) == 1 else torch.cat( (predictions, x), 1) outputs[i] = outputs[i-1] # skip # Print the layer information # print(i, module["type"], x.shape) # return prediction result only when evaluated if self.training == True: return loss else: predictions = self.nms(predictions) return predictions def create_modules(self, blocks): net_info = blocks[0] #Captures the information about the input and pre-processing module_list = nn.ModuleList() in_channels = 3 out_channels_list = [] for index, block in enumerate(blocks[1:]): module = nn.Sequential() # Convolutional Layer if (block["type"] == "convolutional"): activation = block["activation"] try: batch_normalize = int(block["batch_normalize"]) bias = False except: batch_normalize = 0 bias = True out_channels = int(block["filters"]) kernel_size = int(block["size"]) padding = (kernel_size - 1) // 2 if int(block["pad"]) else 0 stride = int(block["stride"]) conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias = bias) module.add_module("conv_{0}".format(index), conv) if batch_normalize: bn = nn.BatchNorm2d(out_channels) module.add_module("batch_norm_{0}".format(index), bn) if activation == "leaky": activn = nn.LeakyReLU(0.1, inplace = True) module.add_module("leaky_{0}".format(index), activn) # Up Sample Layer elif (block["type"] == "upsample"): stride = int(block["stride"]) # = 2 in Yolov3 upsample = nn.Upsample(scale_factor = stride, mode = "nearest") module.add_module("upsample_{}".format(index), upsample) # Shortcut Layer elif block["type"] == "shortcut": shortcut = EmptyLayer() module.add_module("shortcut_{}".format(index), shortcut) # Route Layer elif (block["type"] == "route"): route = EmptyLayer() module.add_module("route_{0}".format(index), route) block["layers"] = block["layers"].split(',') start = int(block["layers"][0]) if len(block['layers']) == 1: start = int(block['layers'][0]) out_channels = out_channels_list[index + start] elif len(block['layers']) == 2: start = int(block['layers'][0]) end = int(block['layers'][1]) out_channels = out_channels_list[index + start] + out_channels_list[end] # Yolo Layer elif block["type"] == "yolo": mask = block["mask"].split(",") mask = [int(x) for x in mask] anchors = block["anchors"].split(",") anchors = [int(a) for a in anchors] anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)] anchors = [anchors[i] for i in mask] num_classes = int(block['classes']) ignore_thresh = float(block['ignore_thresh']) prediction = YOLOLayer(anchors, num_classes, self.reso, ignore_thresh) module.add_module("prediction_{}".format(index), prediction) module_list.append(module) in_channels = out_channels out_channels_list.append(out_channels) return (net_info, module_list) def load_weights(self, path, cutoff=None): """Load darknet weights from disk. YOLOv3 is fully convolutional, so only conv layers' weights will be loaded Darknet's weights data are organized as 1. (optinoal) bn_biases => bn_weights => bn_mean => bn_var 1. (optional) conv_bias 2. conv_weights Args - path: (str) path to .weights file - cutoff: (optinoal, int) """ fp = open(path, 'rb') header = np.fromfile(fp, dtype=np.int32, count=5) weights = np.fromfile(fp, dtype=np.float32) fp.close() header = torch.from_numpy(header) ptr = 0 for i, module in enumerate(self.module_list): block = self.blocks[i] if cutoff is not None and i == cutoff: print("Stop before", block['type'], "block (No.%d)" % (i+1)) break if block['type'] == "convolutional": batch_normalize = int( block['batch_normalize']) if 'batch_normalize' in block else 0 conv = module[0] if batch_normalize > 0: bn = module[1] num_bn_biases = bn.bias.numel() bn_biases = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_biases = bn_biases.view_as(bn.bias.data) bn.bias.data.copy_(bn_biases) ptr += num_bn_biases bn_weights = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_weights = bn_weights.view_as(bn.weight.data) bn.weight.data.copy_(bn_weights) ptr += num_bn_biases bn_running_mean = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_running_mean = bn_running_mean.view_as(bn.running_mean) bn.running_mean.copy_(bn_running_mean) ptr += num_bn_biases bn_running_var = torch.from_numpy( weights[ptr:ptr+num_bn_biases]) bn_running_var = bn_running_var.view_as(bn.running_var) bn.running_var.copy_(bn_running_var) ptr += num_bn_biases else: num_biases = conv.bias.numel() conv_biases = torch.from_numpy(weights[ptr:ptr+num_biases]) conv_biases = conv_biases.view_as(conv.bias.data) conv.bias.data.copy_(conv_biases) ptr = ptr + num_biases num_weights = conv.weight.numel() conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights]) conv_weights = conv_weights.view_as(conv.weight.data) conv.weight.data.copy_(conv_weights) ptr = ptr + num_weights
/yolo/dataset.py
import torch import torch.utils.data from torch.utils.data.dataloader import default_collate # from torchvision import transforms import os # import random import numpy as np from PIL import Image # anchors_wh = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], # [59, 119], [116, 90], [156, 198], [373, 326]], # np.float32) / 416 class MmwaveDataset(torch.utils.data.Dataset): def __init__(self, data_dir, data_size = 0, transforms = None): files = sorted(os.listdir(data_dir)) self.files = [f"{data_dir}/{x}" for x in files] if data_size < 0 or data_size > len(files): assert("Data size should be between 0 to number of files in the dataset") if data_size == 0: data_size = len(files) self.data_size = data_size self.transforms = transforms def __len__(self): return self.data_size def __getitem__(self, idx): image_path = self.files[idx] image = Image.open(image_path) img_w, img_h = image.size image = self.preProcessImage(image) labels = [] # to make it array of bbs (for multiple bbs in the future) labels_str = image_path.split("_")[-1] if "[[" in labels_str: labels_str = labels_str.split('[[')[1].split(']]')[0].split('],[') labels = np.zeros((4, 5)) for i, l in enumerate(labels_str): label = np.zeros(5) label[:4] = np.array([int(a) for a in l.split(',')]) # [xc, yc, w, h] # Normalizing labels label[0] /= img_w #Xcenter label[1] /= img_h #Ycenter label[2] /= img_w #Width label[3] /= img_h #Height labels[i, :] = label else: labels_str = labels_str.split('[')[1].split(']')[0].split(',') # get the bb info from the filename labels = np.zeros((1, 5)) labels[0, :4] = np.array([int(a) for a in labels_str]) # [xc, yc, w, h] if np.any(labels[0, :4] == 0): return image, None # Normalizing labels labels[0, 0] /= img_w #Xcenter labels[0, 1] /= img_h #Ycenter labels[0, 2] /= img_w #Width labels[0, 3] /= img_h #Height # labels[0, 4] = 0 # class label (0 = person) # print(torch.any(torch.isfinite(image) == False), labels) return image_path, image, labels #Image custom preprocessing if required def preProcessImage(self, image): image = image.convert('RGB') if self.transforms: return self.transforms(image) else: image = np.array(image) image = image.transpose(2,1,0) return image.astype(np.float32) def collate(batch): batch = list(filter(lambda x:x[1] is not None, batch)) return default_collate(batch) # Use the default method to splice the filtered batch data def getDataLoaders(data_dir, transforms, train_split=0, batch_size=8, \ num_workers=2, collate_fn=collate, random_seed=0): if train_split < 0 or train_split > 1: raise Exception(f"data_loader | Split ({train_split}) coefficient should be 0 < x < 1") dataset = MmwaveDataset(data_dir=data_dir, transforms=transforms) shuffle = True if random_seed != 0 else False # Single Set if train_split == 0 or train_split == 1: return None, torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, collate_fn = collate_fn) # Generate a fixed seed generator = torch.Generator() if random_seed != 0: generator.manual_seed(random_seed) train_size = int(train_split * len(dataset)) test_size = len(dataset) - train_size trainset, testset = torch.utils.data.random_split(dataset, [train_size, test_size], generator=generator) # Train and Validation sets return torch.utils.data.DataLoader(trainset, batch_size=batch_size, \ shuffle=shuffle, num_workers=2, collate_fn = collate_fn), \ torch.utils.data.DataLoader(testset, batch_size=batch_size, \ shuffle=shuffle, num_workers=2, collate_fn = collate_fn)
/yolo/predict.py
import torch # import torch.nn as nn # import torch.nn.functional as F # import torch.optim as optim # import torchvision import torchvision.transforms as transforms import os, sys # import pickle, time, random import numpy as np # from PIL import Image import argparse from .darknet import DarkNet from .dataset import * from .util import * def parse_arg(): parser = argparse.ArgumentParser(description='MmWaveYoLo Prediction module', add_help=True) parser.add_argument('--cfg', type=str, default='yolov3micro', help="Name of the network config (default: yolov3micro)") parser.add_argument('--pathin', type=str, help="Path for the input folder (default: testset)") parser.add_argument('--pathout', type=str, help="Path for the output folder") parser.add_argument('--video', type=str, default='False', help="Create video after prediction (default: False)") parser.add_argument('--datasplit', type=float, default=0, help="Dataset split percentage (default: 0 (single set))") parser.add_argument('--seed', type=float, default=0, help="Seed for the random shuffling (default: 0, (no shuffle))") parser.add_argument('--bs', type=int, default=8, help="Batch size (default: 8)") parser.add_argument('--ckpt', type=str, default='10.0', help="Checkpoint name <'epoch'.'iteration'>") parser.add_argument('--nms', type=float, default=0.5, help="NMS threshold (default: 0.5)") parser.add_argument('--obj', type=float, default=0.5, help="Objectiveness threshold (default: 0.5)") parser.add_argument('--iou', type=float, default=0.5, help="Intersection over Union threshold (default: 0.5)") parser.add_argument('--reso', type=int, default=416, help="Input image resolution (default: 416)") parser.add_argument('--v', type=int, default=0, help="Verbose (0 minimal (default), 1 normal, 2 all") return parser.parse_args(sys.argv[2:]) def predict(): torch.cuda.empty_cache() # CONSTANTS args = parse_arg() pathcfg = f"cfg/{args.cfg}.cfg" pathin = f"dataset/{args.pathin}/final" pathout = f"results/{args.pathout}" num_workers = 2 # NETWORK darknet = DarkNet(pathcfg, args.reso, args.obj, args.nms) pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad) print('# of params: ', pytorch_total_params) if args.v > 0: print(darknet.module_list) # IMAGE PREPROCESSING!!! transform = transforms.Compose([ transforms.Resize(size=(args.reso, args.reso), interpolation=3), transforms.ToTensor() ]) # ==================================================== # Test data allocation _, testloader = getDataLoaders(pathin, transform, train_split=args.datasplit, batch_size=args.bs, \ num_workers=num_workers, collate_fn=collate, random_seed=args.seed) # ==================================================== start_epoch = 2 start_iteration = 0 # LOAD A CHECKPOINT!!! start_epoch, start_iteration = args.ckpt.split('.') start_epoch, start_iteration, state_dict, _, _, _, _ = load_checkpoint( f'save/checkpoints/', int(start_epoch), int(start_iteration) ) darknet.load_state_dict(state_dict) # ==================================================== # Use GPU if available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") darknet.to(device) # Put the network on device if args.v > 0: print(next(darknet.parameters()).device) # Create the subsequent save folders # if os.path.isdir(pathout): # shutil.rmtree(pathout) if not os.path.isdir(pathout): os.makedirs(pathout) # PREDICT print(f'[LOG] PREDICT | Test set: {len(testloader.dataset)}') darknet.eval() # set network to evaluation mode outcomes = np.zeros(4) predList = [] countLabels = 0 with torch.no_grad(): for bidx, (paths, inputs, targets) in enumerate(testloader): inputs = inputs.to(device) predictions = darknet(inputs) for idx, path in enumerate(paths): print(f'[LOG] PREDICT | Predicting {(bidx*args.bs)+idx+1}/{len(testloader.dataset)}', end='\r') savename = path.split('/')[-1].split('_')[2] try: prediction = predictions[predictions[:, 0] == idx] except Exception: prediction = torch.Tensor([]) print(f'[ERROR] TEST | No prediction? {prediction}') tempL, _= correctness(prediction, targets[idx], reso=darknet.reso, iou_thresh=args.iou) predList.extend(tempL) countLabels += targets[idx].size(0) # draw_prediction(path, prediction, targets[idx], darknet.reso, \ # names=[''], pathout=f'{pathout}/preds', savename=f'{savename}.png') if args.video: animate_predictions(pathout, args.video) print(countLabels) predList = precision_recall(predList, countLabels) plot_precision_recall(predList, pathout=f'{pathout}/map', savename='') # plot_precision_recall(predList, pathout=f'{pathout}/map', savename=f'iou{args.iou}.png') # ====================================================
/yolo/train.py
import torch import torch.nn as nn # import torch.nn.functional as F import torch.optim as optim # import torchvision import torchvision.transforms as transforms # import os, pickle, random import time, sys import numpy as np # from PIL import Image import argparse from .darknet import DarkNet from .dataset import * from .util import * def parse_arg(): parser = argparse.ArgumentParser(description='mmWaveYoLov3 Training module', add_help=True) parser.add_argument('--cfg', type=str, default='yolov3micro', help="Name of the network config") parser.add_argument('--pathin', type=str, default='trainset', help="Input dataset name") parser.add_argument('--datasplit', type=float, default=0.8, help="Dataset split percentage (def: 0.8 (80 (train):20 (validation))") parser.add_argument('--seed', type=float, default=42, help="Seed for the random shuffle (default: 42, 0 for no shuffling)") parser.add_argument('--bs', type=int, default=8, help="Batch size (default: 8, 0 for single batch)") parser.add_argument('--ckpt', type=str, default='0.0', help="Checkpoint name as <'epoch'.'iteration'>") parser.add_argument('--ep', type=int, default=5, help="Total epoch number (default: 5)") parser.add_argument('--lr', type=float, default=1e-5, help="Learning rate (default: 1e-5)") parser.add_argument('--reso', type=int, default=416, help="Input image resolution (default: 416)") parser.add_argument('--v', type=int, default=0, help="Verbose (0 minimal (default), 1 normal, 2 all") return parser.parse_args(sys.argv[2:]) def train(): torch.cuda.empty_cache() # CONSTANTS args = parse_arg() pathcfg = f"cfg/{args.cfg}.cfg" pathin = f"dataset/{args.pathin}/final" num_workers = 2 # NETWORK darknet = DarkNet(pathcfg, args.reso) pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad) print('# of params: ', pytorch_total_params) if args.v > 0: print(darknet.module_list) # LOAD A CHECKPOINT!!! start_epoch, start_iteration = [0, 0] tlosses, vlosses = [], [] optimizer, scheduler = None, None start_epoch, start_iteration = [int(x) for x in args.ckpt.split('.')] if start_epoch != 0 and start_epoch != 0: start_epoch, start_iteration, state_dict, \ tlosses, vlosses, \ optimizer, scheduler = load_checkpoint( f'save/checkpoints/', int(start_epoch), int(start_iteration) ) darknet.load_state_dict(state_dict) # ==================================================== # OPTIMIZER & HYPERPARAMETERS if optimizer == None: # optimizer = optim.SGD(filter(lambda p: p.requires_grad, darknet.parameters()), \ # lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True) optimizer = optim.Adam(filter(lambda p: p.requires_grad, darknet.parameters()), \ lr=args.lr, betas=[0.9,0.999], eps=1e-8, weight_decay=0, amsgrad=False) if scheduler == None: scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # IMAGE PREPROCESSING!!! transform = transforms.Compose([ # transforms.RandomResizedCrop(size=args.reso, interpolation=3), transforms.Resize(size=(args.reso, args.reso), interpolation=3), transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2), transforms.RandomVerticalFlip(), transforms.ToTensor() ]) # ==================================================== # Train and Validation data allocation trainloader, validloader = getDataLoaders(pathin, transform, \ train_split=args.datasplit, batch_size=args.bs, \ num_workers=num_workers, collate_fn=collate, random_seed=args.seed) # ==================================================== # Use GPU if available device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if torch.cuda.device_count() > 1: # Use Multi GPU if available darknet = nn.DataParallel(darknet) darknet.to(device) # Put the network on device if args.v > 0: print(next(darknet.parameters()).device) # TRAIN print(f'[LOG] TRAIN | Training set: {len(trainloader.dataset)}') print(f'[LOG] TRAIN | Validation set: {len(validloader.dataset)}') print(f'[LOG] TRAIN | Starting to train from epoch {start_epoch} iteration {start_iteration}') if start_epoch > args.ep: print(f'[ERR] TRAIN | Total epochs ({args.ep}) is less then current epoch ({start_epoch})') return for epoch in range(start_epoch, args.ep): print(f'[LOG] TRAIN | Starting Epoch #{epoch+1}') darknet.train() # set network to training mode tloss, vloss = [], [] start = time.time() for batch_idx, (_, inputs, targets) in enumerate(trainloader): optimizer.zero_grad() # clear the grads from prev passes inputs, targets = inputs.to(device), targets.to(device) # Images, Labels outputs = darknet(inputs, targets, device) # Loss outputs['total'].backward() # Gradient calculations tloss.append(outputs['total'].item()) optimizer.step() end = time.time() # Latest iteration! if args.v == 1: print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ') elif args.v == 2: print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ' \ f'w: {outputs["w"].item():.2f} h: {outputs["h"].item():.2f} ' \ f'cls: {outputs["cls"].item():.2f} ' \ f'conf: {outputs["conf"].item()}') if (batch_idx % 100) == 99: print(f'[LOG] TRAIN | Batch #{batch_idx+1}\ Loss: {np.mean(tloss)}\ Time: {end - start}s') start = time.time() # Save train loss for the epoch tlosses.append(np.mean(tloss)) scheduler.step() # VALIDATION with torch.no_grad(): for batch_idx, (_, inputs, targets) in enumerate(validloader): inputs, targets = inputs.to(device), targets.to(device) voutputs = darknet(inputs, targets) vloss.append(voutputs['total'].item()) # Validation loss! print(f'[LOG] VALID | Epoch #{epoch+1} \ Loss: {np.mean(vloss)}') # Save valid loss for the epoch vlosses.append(np.mean(vloss)) # ==================================================== if (epoch % 10) == 9: save_checkpoint(f'save/checkpoints/', epoch+1, 0, { 'epoch': epoch+1, 'iteration': 0, 'state_dict': darknet.state_dict(), 'tlosses': tlosses, 'vlosses': vlosses, 'optimizer': optimizer, 'scheduler': scheduler }) plot_losses(tlosses, vlosses, f'save/losses') save_checkpoint(f'save/checkpoints/', epoch+1, 0, { 'epoch': epoch+1, 'iteration': 0, 'state_dict': darknet.state_dict(), 'tlosses': tlosses, 'vlosses': vlosses, 'optimizer': optimizer, 'scheduler': scheduler }) plot_losses(tlosses, vlosses, f'save/losses')
/yolo/util.py
from __future__ import division import torch import os from operator import itemgetter import numpy as np import cv2 from PIL import Image, ImageDraw import matplotlib.pyplot as plt def draw_prediction(img_path, prediction, target, reso, names, pathout, savename): """Draw prediction result Args - img_path: (str) Path to image - prediction: (np.array) Prediction result with size [#bbox, 8] 8 = [batch_idx, x1, y1, x2, y2, objectness, cls_conf, class idx] - target: (np.array) Prediction result with size [#bbox, 5] 8 = [batch_idx, x1, y1, x2, y2, class idx] - reso: (int) Image resolution - names: (list) Class names - save_path: (str) Path to save prediction result """ img = Image.open(img_path).convert('RGB') w, h = img.size h_ratio = h / reso w_ratio = w / reso draw = ImageDraw.Draw(img) # Drawing targets (labels) try: for i in range(target.shape[0]): bbox = target[i, 0:4].numpy() bbox = xywh2xyxy(bbox, target=True) caption = f'truth #{i}' color = (255, 255, 255) x1, y1, x2, y2 = bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)), outline=color, width=2) draw.rectangle((x1 * w_ratio, y2 * h_ratio + 15, x2 * w_ratio, y2 * h_ratio), fill=color) draw.text((x1 * w_ratio + 2, y2 * h_ratio), caption, fill='black') except Exception: print(f'[ERR] TEST | Could not draw target') # Drawing predictions try: for i in range(prediction.shape[0]): bbox = prediction[i, 1:5] conf = '%.2f' % prediction[i, -3] caption = f'pred {conf}' color = (0, 0, 255) x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3] draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)), outline=color, width=int(1+prediction[i, -3]*5)) draw.rectangle((x1 * w_ratio, y1 * h_ratio - 15, x2 * w_ratio, y1 * h_ratio), fill=color) draw.text((x1 * w_ratio + 2, y1 * h_ratio - 15), caption, fill='white') except Exception: print(f'[ERR] TEST | Could not draw prediction') # img.show() os.makedirs(pathout, exist_ok=True) img.save(f'{pathout}/{savename}') img.close() def animate_predictions(path, savetype='gif'): fps = 5 if savetype == 'gif': gif = [] images = (Image.open(f'{path}/preds/{f}').copy() for f in sorted(os.listdir(f'{path}/preds')) if f.endswith('.png')) for image in images: gif.append(image) os.makedirs(path, exist_ok=True) gif[0].save(f'{path}/sequence.gif', save_all=True, \ optimize=False, append_images=gif[1:], loop=0, \ duration=int(1000/fps)) print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.gif') elif savetype == 'avi': images = [img for img in sorted(os.listdir(f'{path}/preds')) if img.endswith(".png")] frame = cv2.imread(f'{path}/preds/{images[0]}') height, width, _ = frame.shape video = cv2.VideoWriter(f'{path}/sequence.avi', 0, fps, (width,height)) for image in images: video.write(cv2.imread(f'{path}/preds/{image}')) cv2.destroyAllWindows() video.release() print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.avi') def IoU(box1, box2): """ Compute IoU between box1 and box2 """ if box1.is_cuda == True: box1 = box1.cpu() if box2.is_cuda == True: box2 = box2.cpu() #Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1[...,0], box1[...,1], box1[...,2], box1[...,3] b2_x1, b2_y1, b2_x2, b2_y2 = box2[...,0], box2[...,1], box2[...,2], box2[...,3] #get the corrdinates of the intersection rectangle inter_rect_x1 = torch.max(b1_x1, b2_x1) inter_rect_y1 = torch.max(b1_y1, b2_y1) inter_rect_x2 = torch.min(b1_x2, b2_x2) inter_rect_y2 = torch.min(b1_y2, b2_y2) #Intersection area inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0) #Union Area b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1) b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1) iou = inter_area / (b1_area + b2_area - inter_area) return iou # TP / FP / FN / TN calculations def correctness(prediction, target, reso=416, iou_thresh=0.5): flagP = np.zeros([prediction.size(0), 2]) # Flag for predictions flagP[:,1] -= 1 tempCor = np.zeros(4) flagT = np.zeros(target.size(0))-1 tempList = [] if prediction.size(0) != 0: for i, p in enumerate(prediction): for j, t in enumerate(target): iou = IoU(p[1:5], xywh2xyxy(t[0:4]*reso)).numpy()[0] if iou > flagP[i, 0]: flagP[i,:] = [iou, j] for i in range(flagP.shape[0]): if flagP[i,0] >= iou_thresh and flagT[int(flagP[i,1])] == -1: # True Positive: iou >= thresh tempCor[0] += 1 flagT[int(flagP[i,1])] = 1 tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], False]) else: # False Positive: iou < thresh or duplicates tempCor[1] = 1 tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], True]) # False Negative if np.count_nonzero(flagP[:, 1] == -1) == prediction.size(0): tempCor[2] += 1 return tempList, tempCor # Precision and recall calculations def precision_recall(predList, countLabels): predList.sort(key = itemgetter(1), reverse=True) # Sort by IoU predList.sort(key = itemgetter(2)) # Sort by TP predList.sort(key = itemgetter(0), reverse=True) # Sort by objectiveness for i, l in enumerate(predList): temp = [0, 0, 0, 0] if l[2] == False: temp[0] = 1 # TP else: temp[1] = 1 # FP if i != 0: temp[0] += predList[i-1][3] # Cumulative TP temp[1] += predList[i-1][4] # Cumulative FP temp[2] = float(temp[0] / (temp[0] + temp[1])) # Precision temp[3] = float(temp[0] / countLabels) # Recall l.extend(temp) return predList # Drawing precision/recall curve def plot_precision_recall(predList, pathout, savename=''): predArr = np.array(predList, dtype=np.float) # print(np.round(predArr[:,-2:], 2)) fig, _= plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1]}) plt.subplot(2, 1, 1) plt.plot(predArr[:, -1], predArr[:, -2]) plt.plot(np.round(predArr[:,-1], 2), np.round(predArr[:,-2], 2)) plt.grid(True) plt.title(f'Precision/Recall graph ({savename})') plt.xlabel('Recall') plt.ylabel('Precision') plt.subplot(2, 1, 2) plt.plot(predArr[:,0]) ax = plt.gca() ax.axes.xaxis.set_visible(False) # ax.axes.yaxis.set_visible(False) plt.rcParams['axes.titley'] = 1.0 # y is in axes-relative coordinates. plt.rcParams['axes.titlepad'] = -14 # pad is in points... plt.title(f'Objectiveness score') if savename != '': os.makedirs(f'{pathout}/{savename}', exist_ok=True) plt.savefig(f'{pathout}/{savename}', dpi=100) print(f'[LOG] TRAIN | Precision/Recall graph save \"{pathout}/{savename}\"') else: plt.show() plt.close() def xywh2xyxy(bbox, target=False): if target: xc, yc = bbox[0], bbox[1] half_w, half_h = bbox[2] / 2, bbox[3] / 2 return [xc - half_w, yc - half_h, xc + half_w, yc + half_h] bbox_ = bbox.clone() if len(bbox_.size()) == 1: bbox_ = bbox_.unsqueeze(0) xc, yc = bbox_[..., 0], bbox_[..., 1] half_w, half_h = bbox_[..., 2] / 2, bbox_[..., 3] / 2 bbox_[..., 0] = xc - half_w bbox_[..., 1] = yc - half_h bbox_[..., 2] = xc + 2 * half_w bbox_[..., 3] = yc + 2 * half_h return bbox_ #Check if it is working!!! def xyxy2xywh(bbox, target=False): if target: w, h = bbox[2] - bbox[0], bbox[3] - bbox[1] xc, yc = bbox[0] + w/2, bbox[1] + h/2 return [xc, yc, w, h] bbox_ = bbox.clone() if len(bbox_.size()) == 1: bbox_ = bbox_.unsqueeze(0) w, h = bbox_[..., 2] - bbox_[..., 0], bbox_[..., 3] - bbox_[..., 1] xc, yc = bbox_[..., 0] + w/2, bbox_[..., 1] + h/2 bbox_[..., 0] = xc bbox_[..., 1] = yc bbox_[..., 2] = w bbox_[..., 3] = h return bbox_ def load_checkpoint(checkpoint_dir, epoch, iteration): """Load checkpoint from path Args - checkpoint_dir: (str) absolute path to checkpoint folder - epoch: (int) epoch of checkpoint - iteration: (int) iteration of checkpoint in one epoch Returns - start_epoch: (int) - start_iteration: (int) - state_dict: (dict) state of model """ path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt') if not os.path.isfile(path): raise Exception("Checkpoint in epoch %d doesn't exist" % epoch) checkpoint = torch.load(path) start_epoch = checkpoint['epoch'] state_dict = checkpoint['state_dict'] start_iteration = checkpoint['iteration'] tlosses = checkpoint['tlosses'] vlosses = checkpoint['vlosses'] optimizer = checkpoint['optimizer'] scheduler = checkpoint['scheduler'] assert epoch == start_epoch, "epoch != checkpoint's start_epoch" assert iteration == start_iteration, "iteration != checkpoint's start_iteration" return start_epoch, start_iteration, state_dict, tlosses, vlosses, optimizer, scheduler def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict): """Save checkpoint to path Args - path: (str) absolute path to checkpoint folder - epoch: (int) epoch of checkpoint file - iteration: (int) iteration of checkpoint in one epoch - save_dict: (dict) saving parameters dict """ os.makedirs(checkpoint_dir, exist_ok=True) path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt') assert epoch == save_dict['epoch'], "[ERROR] epoch != save_dict's start_epoch" assert iteration == save_dict['iteration'], "[ERROR] iteration != save_dict's start_iteration" if os.path.isfile(path): print("[WARNING] Overwrite checkpoint in epoch %d, iteration %d" % (epoch, iteration)) try: torch.save(save_dict, path) except Exception: raise Exception("[ERROR] Fail to save checkpoint") print("[LOG] Checkpoint %d.%d.ckpt saved" % (epoch, iteration)) def parse_cfg(cfgfile): """ Takes a configuration file Returns a list of blocks. Each blocks describes a block in the neural network to be built. Block is represented as a dictionary in the list """ file = open(cfgfile, 'r') lines = file.read().split('\n') # store the lines in a list lines = [x for x in lines if len(x) > 0] # get read of the empty lines lines = [x for x in lines if x[0] != '#'] # get rid of comments lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces file.close() block = {} blocks = [] for line in lines: if line[0] == "[": # This marks the start of a new block if len(block) != 0: # If block is not empty, implies it is storing values of previous block. blocks.append(block) # add it the blocks list block = {} # re-init the block block["type"] = line[1:-1].rstrip() else: key,value = line.split("=") block[key.rstrip()] = value.lstrip() blocks.append(block) return blocks def plot_losses(tlosses, vlosses=None, savepath=''): plt.plot(range(0, len(tlosses)), tlosses) if vlosses: plt.plot(range(0, len(vlosses)), vlosses) plt.legend(['Train loss', 'Valid loss'], loc='upper left') plt.title(f'Training and Validation loss ({len(tlosses)} Epochs) ') else: plt.legend(['Train loss'], loc='upper left') plt.title(f'Training loss ({len(tlosses)} Epochs) ') plt.xlabel('Epoch') plt.ylabel('Loss') if savepath != '': os.makedirs(savepath, exist_ok=True) plt.savefig(f'{savepath}/loss_{len(tlosses)}.png', dpi=100) print(f'[LOG] TRAIN | Loss graph save \"{savepath}/loss_{len(tlosses)}.png\"') else: plt.show() plt.close()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
RoboBrainCode/Backend
refs/heads/master
{"/rest_api/views.py": ["/rest_api/serializer.py", "/feed/models.py"], "/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/feed/models.py": ["/feed/queue_util.py"]}
└── ├── UpdateViewerFeeds │ └── updateViewerFeed.py ├── auth │ ├── auth.py │ └── urls.py ├── fabfile.py ├── feed │ ├── models.py │ ├── queue_util.py │ ├── urls.py │ └── views.py └── rest_api ├── serializer.py ├── urls.py └── views.py
/UpdateViewerFeeds/updateViewerFeed.py
import ConfigParser import pymongo as pm from datetime import datetime import numpy as np import importlib import sys sys.path.insert(0,'/var/www/Backend/Backend/') def readConfigFile(): """ Reading the setting file to use. Different setting files are used on Production and Test robo brain """ global setfile config = ConfigParser.ConfigParser() config.read('/tmp/backend_uwsgi_setting') env = config.get('uwsgi','env') setting_file_name = env.strip().split('.')[1] setfile = importlib.import_module(setting_file_name) def establishConnection(): """ Establishes connection to remote db """ global brain_feeds, viewer_feeds client = pm.MongoClient(host,port) db = client[dbname] brain_feeds = db['brain_feeds'] viewer_feeds = db['viewer_feeds'] def viewerFeedsUpdate(): """ Sorts Brain Feeds on Basis of score and pushes them to ViewerFeeds table """ feeds_ordered = brain_feeds.find().sort('score',pm.DESCENDING) overall_counter = 0 feeds_to_push = [] first_time = True for feeds in feeds_ordered: try: new_feed = {} new_feed['_id'] = overall_counter new_feed['feedid'] = feeds['_id'].__str__() feeds_to_push.append(new_feed) overall_counter += 1 print "{0} {1} {2}".format(overall_counter,feeds['score'],feeds['source_url']) if overall_counter % 100 == 0: if first_time: viewer_feeds.drop() first_time = False viewer_feeds.insert(feeds_to_push) feeds_to_push = [] except: print "**************skipping*************" def viewerFeedsUpdate_deprecated(): """ DEPRECATED Equally represent each project """ different_projects = brain_feeds.distinct('source_url') different_projects = sorted(different_projects,key=len) feeds_each_project = {} feeds_count = {} for url in different_projects: feeds_each_project[url] = brain_feeds.find({'source_url':url},{'created_at':1}).sort('created_at',pm.DESCENDING) feeds_count[url] = feeds_each_project[url].count() feeds_to_push = [] overall_counter = 0 level = 0 first_time = True while True: toBreak = True remaining_projects = [] for url in different_projects: if feeds_count[url] > level: print url new_feed = {} new_feed['_id'] = overall_counter new_feed['feedid'] = feeds_each_project[url][level]['_id'].__str__() feeds_to_push.append(new_feed) overall_counter += 1 remaining_projects.append(url) toBreak = False if overall_counter % 100 == 0: if first_time: viewer_feeds.drop() first_time = False viewer_feeds.insert(feeds_to_push) feeds_to_push = [] different_projects = remaining_projects if toBreak: break level += 1 if __name__=="__main__": global host, dbname, port, setfile, brain_feeds, viewer_feeds # Reading the setting file for db address readConfigFile() host = setfile.DATABASES['default']['HOST'] dbname = setfile.DATABASES['default']['NAME'] port = int(setfile.DATABASES['default']['PORT']) # Extablishing connection to remote db establishConnection() viewerFeedsUpdate()
/auth/auth.py
from django.http import HttpResponse import json from django.contrib.auth.models import User from django.views.decorators.csrf import ensure_csrf_cookie from django import forms from django.contrib.auth import login, logout from django.contrib.auth import authenticate from base64 import b64decode @ensure_csrf_cookie def create_user_rb(request): if request.method == 'GET': return HttpResponse('Ok') elif request.method == 'POST': payload = json.loads(request.body) username = payload['username'] email = payload['email'] password = payload['password'] if email and User.objects.filter(email=email).exclude(username=username).count(): return HttpResponse('This email address is already in use! Try logging in.', status=401) if email and User.objects.filter(email=email, username=username).count(): return HttpResponse('This account already exists! Try logging in.', status=401) user = User.objects.create_user(username, email, password) user.save() return HttpResponse('Ok') @ensure_csrf_cookie def login_rb(request): if request.user.is_authenticated(): user = request.user user_data = { 'id': user.id, 'username': user.username, 'email': user.email, 'loggedin': 'True' }; return HttpResponse(json.dumps(user_data), content_type='application/json') if request.method == 'GET': return HttpResponse('Ok') elif request.method == 'POST': decodedCredentials = b64decode(request.body) if not ':' in decodedCredentials: return HttpResponse('Not logged in', status=401) email, password = decodedCredentials.split(':') user = authenticateEmail(email, password) if not user: return HttpResponse('Invalid Credentials', status=401) user = authenticate(username=user.username, password=password) if not user: return HttpResponse('Invalid Credentials', status=401) login(request, user) user_data = { 'id': user.id, 'username': user.username, 'email': user.email }; return HttpResponse(json.dumps(user_data), content_type='application/json') def authenticateEmail(email=None, password=None): try: user = User.objects.get(email=email) if user.check_password(password): return user except User.DoesNotExist: return None def logout_rb(request): logout(request) return HttpResponse('Logged Out')
/auth/urls.py
from django.conf.urls import patterns, url import auth urlpatterns = patterns('', url(r'create_user/', auth.create_user_rb, name='create_user'), url(r'login/', auth.login_rb, name='login'), url(r'logout/', auth.logout_rb, name='logout') )
/fabfile.py
from __future__ import with_statement from fabric.api import cd, env, local, settings, run, sudo from fabric.colors import green, red from fabric.contrib.console import confirm def prod_deploy(user='ubuntu'): print(red('Deploying to production at robobrain.me...')) if not confirm('Are you sure you want to deploy to production?'): print(red('Aborting deploy.')) env.host_string = '54.149.21.165' env.key_filename = 'conf/www.pem' env.user = user env.shell = '/bin/zsh -l -c' with cd('/var/www/Backend'): # sudo('su - ubuntu') print(green('Checking out test...')) run('git checkout test') print(green('Pulling latest version of test...')) run('git pull origin test') print(green('Checking out production...')) run('git checkout production') print(green('Rebasing onto test...')) run('git rebase test') print(green('Pushing production upstream...')) run('git push origin production') print(green('Reloading server...')) sudo('uwsgi --reload /tmp/robobrain-master.pid') print(red('Done!')) def test_deploy(user='ubuntu'): env.host_string = '54.148.225.192' env.key_filename = 'conf/www.pem' env.user = user env.shell = '/bin/zsh -l -c' print(red('Deploying to test at test.robobrain.me...')) with cd('/var/www/Backend'): print(green('Checking out master...')) run('git checkout master') print(green('Pulling latest version of master...')) run('git pull origin master') print(green('Checking out test...')) run('git checkout test') print(green('Rebasing onto master...')) run('git rebase master') print(green('Pulling latest version of test...')) run('git pull origin test') print(green('Push the latest version of test...')) run('git push origin test') print(green('Reloading server...')) sudo('uwsgi --reload /tmp/robobrain-master.pid') print(red('Done!'))
/feed/models.py
from django.db import models from djangotoolbox.fields import ListField from datetime import datetime from django.db.models.signals import post_save from queue_util import add_feed_to_queue #from feed.models import BrainFeeds class GraphFeedback(models.Model): id_node = models.TextField() feedback_type = models.TextField() node_handle = models.TextField() action_type = models.TextField() def to_json(self): return {"_id":self.id, "id_node":self.id_node, "feedback_type":self.feedback_type, "node_handle":self.node_handle, "action_type":self.action_type } class Meta: db_table = "graph_feedback" class BrainFeeds(models.Model): toshow = models.BooleanField(default=True) feedtype = models.TextField() #originally feedtype -> type text = models.TextField() source_text = models.TextField() source_url = models.TextField(db_index=True) meta = {'indexes':['source_url']} media = ListField() mediatype = ListField() created_at = models.DateTimeField(default=datetime.now()) hashtags = models.TextField(db_index=True) meta = {'indexes':['hashtags']} upvotes = models.IntegerField(default=0) downvotes = models.IntegerField(default=0) jsonfeed_id = models.TextField() username = models.TextField() score = models.FloatField(default=0.0,db_index=True) meta = {'indexes':['score']} update_score = models.BooleanField(default=True,db_index=True) meta = {'indexes':['update_score']} log_normalized_feed_show = models.FloatField(default=1.0) def to_json(self): return {"_id":self.id, "toshow":self.toshow, "feedtype":self.feedtype, "text":self.text, "source_text":self.source_text, "source_url":self.source_url, "media":self.media, "mediatype":self.mediatype, "created_at":self.created_at.isoformat(), "hashtags":self.hashtags, "upvotes":self.upvotes, "downvotes":self.downvotes, "jsonfeed_id":self.jsonfeed_id, "username":self.username, "score":self.score, "log_normalized_feed_show":self.log_normalized_feed_show, "update_score":self.update_score } class Meta: db_table = 'brain_feeds' get_latest_by = 'created_at' class JsonFeeds(models.Model): feedtype = models.TextField() #originally feedtype -> type text = models.TextField() source_text = models.TextField() source_url = models.TextField() mediashow = ListField() media = ListField() mediatype = ListField() mediamap = ListField() keywords = ListField() graphStructure = ListField() created_at = models.DateTimeField() hashtags = models.TextField(default=datetime.now, blank=True) meta = {'indexes':['hashtags']} upvotes = models.IntegerField(default=0) downvotes = models.IntegerField(default=0) username = models.TextField() def to_json(self): return {"_id":self.id, "feedtype":self.feedtype, "text":self.text, "source_text":self.source_text, "source_url":self.source_url, "mediashow":self.mediashow, "media":self.media, "mediatype":self.mediatype, "mediamap":self.mediamap, "keywords":self.keywords, "graphStructure":self.graphStructure, "created_at":self.created_at.isoformat(), "hashtags":self.hashtags, "upvotes":self.upvotes, "downvotes":self.downvotes, "username":self.username } class Meta: db_table = 'json_feeds' def postSaveJson(**kwargs): instance = kwargs.get('instance') print "Post Saving JsonFeed: ", instance.to_json() add_feed_to_queue(instance.to_json()) #Saving JsonFeed to BrainFeed brain_feed = BrainFeeds( feedtype=instance.feedtype, text=instance.text, source_text=instance.source_text, source_url=instance.source_url, hashtags=instance.hashtags, jsonfeed_id=instance.id, username=instance.username ) media = [] mediatype = [] for mediashow,_media,_mediatype in zip(instance.mediashow,instance.media,instance.mediatype): if mediashow.lower() == 'true': media.append(_media) mediatype.append(_mediatype) brain_feed.media = media brain_feed.mediatype = mediatype brain_feed.save() #Saving viewer feed """ numitem = ViewerFeed.objects.all().count() viewer_feed = ViewerFeed( id = numitem, feedid = brain_feed.id ) viewer_feed.save() """ #Saving JsonFeed to GraphDB post_save.connect(postSaveJson, JsonFeeds) class ViewerFeed(models.Model): feedid = models.TextField() id = models.IntegerField(db_index=True,primary_key=True) meta = {'indexes':['id']} def to_json(self): return {"_id":self.id,"id":self.id,"feedid":self.feedid} class Meta: db_table = 'viewer_feeds'
/feed/queue_util.py
#!/usr/bin/python import boto import json import traceback from boto.sqs.message import RawMessage from bson import json_util conn = boto.sqs.connect_to_region( "us-west-2", aws_access_key_id='AKIAIDKZIEN24AUR7CJA', aws_secret_access_key='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') feed_queue = conn.create_queue('feed_queue') def add_feed_to_queue(json_feed): m = RawMessage() try: m.set_body(json.dumps(json_feed, default=json_util.default)) feed_queue.write(m) except Exception, e: print traceback.format_exc() print json_feed if __name__ == '__main__': add_feed_to_queue({ "username" : "arzav", "_id": "546e6a2f5caae434656bbc36", "feedtype" : "", "mediashow" : [ ], "text" : "#Simhat_Torah is a synonym of #Rejoicing_in_the_Law", "hashtags" : " simhat_torah rejoicing_in_the_law", "mediatype" : [ ], "source_url" : "http://wordnet.princeton.edu/", "source_text" : "WordNet", "mediamap" : [ ], "media" : [ ], "keywords": ["Simhat_Torah","Rejoicing_in_the_Law","synonym","wordnet"], "upvotes" : 0, "graphStructure": ["#same_synset: #0 -> #1", "#same_synset: #1 -> #0"]})
/feed/urls.py
from django.conf.urls import patterns, url from feed import views urlpatterns = patterns('', url(r'most_recent/', views.return_top_k_feeds, name='most_recent'), url(r'infinite_scroll/', views.infinite_scrolling, name='infinite_scrolling'), url(r'filter/', views.filter_feeds_with_hashtags, name='filter'), url(r'filter_type/', views.filter_feeds_with_type, name='filter_type'), url(r'since/', views.return_feeds_since, name='since'), url(r'upvotes/', views.upvotes_recorder, name='upvotes'), url(r'graph_feedback/', views.save_graph_feedback, name='graph_feedback'), )
/feed/views.py
from django.http import HttpResponse from feed.models import BrainFeeds, ViewerFeed, GraphFeedback import json import numpy as np from django.core import serializers import dateutil.parser from django.views.decorators.csrf import ensure_csrf_cookie from django.db.transaction import commit_on_success # This is a temporary function. It will be later moved to learning_plugins def save_graph_feedback(request): _id_node = (request.GET.get('id','-1')) # default k=10 _feedback_type = request.GET.get('feedback_type','') _node_handle = request.GET.get('node_handle','') _action_type = request.GET.get('action_type','') graph_feedback = GraphFeedback( id_node = _id_node, feedback_type = _feedback_type, node_handle = _node_handle, action_type = _action_type ) graph_feedback.save() return HttpResponse(json.dumps(graph_feedback.to_json()), content_type="application/json") # Returns k most recent feeds from BrainFeed table. def return_top_k_feeds(request): # Number of feeds required top_k = int(request.GET.get('k','10')) # default k=10 max_len = ViewerFeed.objects.count() upper_limit = min(max_len, top_k) feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[:upper_limit]) brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids) # Reordering brainfeeds from the DB in order of feed_ids in O(n) # s.t. feed_ids == [bf.id for bf in brainfeeds] feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))} brainfeeds = [0] * len(feed_ids) for bf in list(brainfeeds_db): brainfeeds[feed_map_order[bf.id]] = bf # Deleting entries from brainfeeds where brainfeeds == 0 delete_entries = [] for bf in brainfeeds: if bf == 0: delete_entries.append(0) for bf in delete_entries: brainfeeds.remove(bf) update_scores_top_k(brainfeeds) json_feeds = [feed.to_json() for feed in brainfeeds] return HttpResponse(json.dumps(json_feeds), content_type="application/json") # This function allows infinite scrolling. def infinite_scrolling(request): # Feeds already present current_feeds = int(request.GET.get('cur','10')) # default cur=10 # Number of extra feeds required extra_feeds = int(request.GET.get('k','10')) # default k=10 max_len = ViewerFeed.objects.count() upper_limit = min(max_len, current_feeds + extra_feeds) feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[current_feeds:upper_limit]) brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids) # Reordering brainfeeds from the DB in order of feed_ids in O(n) # s.t. feed_ids == [bf.id for bf in brainfeeds] feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))} brainfeeds = [0] * len(feed_ids) for bf in list(brainfeeds_db): brainfeeds[feed_map_order[bf.id]] = bf # Deleting entries from brainfeeds where brainfeeds == 0 delete_entries = [] for bf in brainfeeds: if bf == 0: delete_entries.append(0) for bf in delete_entries: brainfeeds.remove(bf) update_scores_scroll(brainfeeds, current_feeds, extra_feeds) json_feeds = [feed.to_json() for feed in brainfeeds] return HttpResponse(json.dumps(json_feeds), content_type="application/json") @commit_on_success def update_scores_top_k(brainfeeds): for feeds in brainfeeds: feeds.update_score = True feeds.log_normalized_feed_show += 1.0 feeds.save() @commit_on_success def update_scores_scroll(brainfeeds, current_feeds, extra_feeds): page_number = current_feeds/max(1.0,extra_feeds) + 1.0 for feeds in brainfeeds: feeds.update_score = True feeds.log_normalized_feed_show += np.log10(1.0+page_number) feeds.save() # Filters feeds using the hash word def filter_feeds_with_hashtags(request): hashword = request.GET.get('hashword') # Number of extra feeds required k = int(request.GET.get('k','10')) # default k=10 if not hashword: error_response = { 'Error': 'hashword not provided.' } return HttpResponse(json.dumps(error_response), content_type='application/json') brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(hashtags__contains=hashword).order_by('-created_at')[:k] json_feeds = [feed.to_json() for feed in brain_feeds] return HttpResponse(json.dumps(json_feeds), content_type="application/json") # Filters feeds with types def filter_feeds_with_type(request): feedtype = request.GET.get('type') print(feedtype) # Number of extra feeds required k = int(request.GET.get('k','10')) # default k=10 if not feedtype: error_response = { 'Error': 'type not provided.' } return HttpResponse(json.dumps(error_response), content_type='application/json') brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(source_text=feedtype).order_by('-created_at')[:k] json_feeds = [feed.to_json() for feed in brain_feeds] return HttpResponse(json.dumps(json_feeds), content_type="application/json") # Return feeds created after datetime. Input time should be in ISO string format. It is them parsed to UTC format def return_feeds_since(request): time_since = dateutil.parser.parse(request.GET.get('datetime')) # Number of extra feeds required k = int(request.GET.get('k','10')) # default k=10 if not time_since: error_response = { 'Error': 'time_since not provided.' } return HttpResponse(json.dumps(error_response), content_type='application/json') brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(created_at__gte=time_since).order_by('-created_at')[:k] json_feeds = [feed.to_json() for feed in brain_feeds] return HttpResponse(json.dumps(json_feeds), content_type="application/json") # Records upvotes for a feed @ensure_csrf_cookie def upvotes_recorder(request): if request.method == 'GET': return HttpResponse('Ok') elif request.method == 'POST': payload = json.loads(request.body) feedid = payload['feedid'] vote_dir = payload['vote'] change = payload['change'] if not feedid: error_response = { 'Error': 'No feedid provided' } return HttpResponse(json.dumps(error_response), content_type='application/json') if not vote_dir == -1 and not vote_dir == 1: error_response = { 'Error': 'voteid {0} not provided. Can only be 1 or -1'.format(vote_dir) } return HttpResponse(json.dumps(error_response), content_type='application/json') brain_feed = BrainFeeds.objects.get(id=feedid) votes = {} if vote_dir == 1: brain_feed.upvotes += 1 if change: brain_feed.downvotes -= 1 if vote_dir == -1: brain_feed.downvotes += 1 if change: brain_feed.upvotes -= 1 votes = { 'upvotes': max(brain_feed.upvotes, 0), 'downvotes': max(brain_feed.downvotes, 0) } brain_feed.save() return HttpResponse(json.dumps(votes), content_type='application/json')
/rest_api/serializer.py
from django.forms import widgets from rest_framework import serializers from feed.models import JsonFeeds from djangotoolbox.fields import ListField import drf_compound_fields.fields as drf from datetime import datetime class TagFieldS(serializers.Serializer): media = serializers.CharField(required=False) class FeedSerializer(serializers.Serializer): pk = serializers.Field() # Note: `Field` is an untyped read-only field. feedtype = serializers.CharField(required=False) text = serializers.CharField(required=False) source_text = serializers.CharField(required=False) source_url = serializers.CharField(required=False) hashtags = serializers.CharField(required=False) created_at = serializers.DateTimeField(required=False) upvotes = serializers.IntegerField(required=False) media = drf.ListField(serializers.CharField(),required=False)# serializers.CharField(required=False,many=True) mediamap = drf.ListField(serializers.CharField(),required=False) mediatype = drf.ListField(serializers.CharField(),required=False) keywords = drf.ListField(serializers.CharField(),required=False) graphStructure = drf.ListField(serializers.CharField(),required=False) mediashow = drf.ListField(serializers.CharField(),required=False) username = serializers.CharField(required=False) def restore_object(self, attrs, instance=None): """ Create or update a new snippet instance, given a dictionary of deserialized field values. Note that if we don't define this method, then deserializing data will simply return a dictionary of items. """ if instance: # Update existing instance #instance.feedtype = attrs.get('feedtype', instance.feedtype) #instance.code = attrs.get('code', instance.code) #instance.linenos = attrs.get('linenos', instance.linenos) #instance.language = attrs.get('language', instance.language) #instance.style = attrs.get('style', instance.style) return instance # Create new instance attrs['created_at']=datetime.now() return JsonFeeds(**attrs)
/rest_api/urls.py
from django.conf.urls import patterns, url from rest_framework.urlpatterns import format_suffix_patterns urlpatterns = patterns('rest_api.views', url(r'^feeds/$', 'feed_list'), #url(r'^snippets/(?P<pk>[0-9]+)$', 'snippet_detail'), ) urlpatterns = format_suffix_patterns(urlpatterns)
/rest_api/views.py
# Create your views here. from rest_framework import status from rest_framework.decorators import api_view from rest_framework.response import Response from feed.models import JsonFeeds from rest_api.serializer import FeedSerializer from datetime import datetime from rest_framework import permissions @api_view(['GET', 'POST']) def feed_list(request): #List all snippets, or create a new snippet. if request.method == 'GET': feeds = JsonFeeds.objects.all()[:25] serializer = FeedSerializer(feeds, many=True) return Response(serializer.data) elif request.method == 'POST': serializer = FeedSerializer(data=request.DATA) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
KAcee77/django_sputnik_map
refs/heads/main
{"/django_sputnik_maps/__init__.py": ["/django_sputnik_maps/widgets.py"], "/sample/admin.py": ["/django_sputnik_maps/widgets.py", "/django_sputnik_maps/fields.py", "/sample/models.py"], "/sample/models.py": ["/django_sputnik_maps/fields.py"]}
└── ├── django_sputnik_maps │ ├── __init__.py │ ├── apps.py │ ├── fields.py │ └── widgets.py └── sample ├── admin.py └── models.py
/django_sputnik_maps/__init__.py
from .widgets import AddressWidget
/django_sputnik_maps/apps.py
from django.apps import AppConfig class DjangoSputnikMapsConfig(AppConfig): name = 'django_sputnik_maps'
/django_sputnik_maps/fields.py
from django.db import models class AddressField(models.CharField): pass
/django_sputnik_maps/widgets.py
from django.conf import settings from django.forms import widgets class AddressWidget(widgets.TextInput): '''a map will be drawn after the address field''' template_name = 'django_sputnik_maps/widgets/mapwidget.html' class Media: css = { 'all': ('https://unpkg.com/leaflet@1.0.1/dist/leaflet.css', settings.STATIC_URL + 'django_sputnik_maps/css/jquery-ui.min.css', settings.STATIC_URL + 'django_sputnik_maps/css/base.css',) } js=( "https://unpkg.com/leaflet@1.0.1/dist/leaflet.js", settings.STATIC_URL + 'django_sputnik_maps/js/base.js', settings.STATIC_URL + 'django_sputnik_maps/js/jquery-3.5.1.js', settings.STATIC_URL + 'django_sputnik_maps/js/jquery-ui.min.js', )
/sample/admin.py
# from django.db import models from django.contrib import admin from django_sputnik_maps.fields import AddressField from django_sputnik_maps.widgets import AddressWidget from .models import SampleModel @admin.register(SampleModel) class SampleModelAdmin(admin.ModelAdmin): formfield_overrides = { AddressField: { 'widget': AddressWidget } }
/sample/models.py
from django.db import models from django_sputnik_maps.fields import AddressField # all fields must be present in the model class SampleModel(models.Model): region = models.CharField(max_length=100) place = models.CharField(max_length=100) street = models.CharField(max_length=100) house = models.IntegerField() lat = models.FloatField() lon = models.FloatField() address = AddressField(max_length=200)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
sudheermouni/NeckTie
refs/heads/main
{"/Necktie/necktieapp/admin.py": ["/Necktie/necktieapp/models/patients.py", "/Necktie/necktieapp/models/patent_doctorTb.py", "/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/models/__init__.py": ["/Necktie/necktieapp/models/patients.py", "/Necktie/necktieapp/models/patent_doctorTb.py", "/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/models/patent_doctorTb.py": ["/Necktie/necktieapp/models/patients.py", "/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/views/patient_view.py": ["/Necktie/necktieapp/models/patients.py"], "/Necktie/necktieapp/views/__init__.py": ["/Necktie/necktieapp/views/doctor_view.py", "/Necktie/necktieapp/views/patient_view.py"], "/Necktie/necktieapp/serializers/__init__.py": ["/Necktie/necktieapp/serializers/doctor_serializer.py"], "/Necktie/necktieapp/views/doctor_view.py": ["/Necktie/necktieapp/serializers/doctor_serializer.py", "/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/management/commands/bulk_create.py": ["/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/models/patients.py": ["/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/serializers/doctor_serializer.py": ["/Necktie/necktieapp/models/doctors.py"], "/Necktie/necktieapp/tests/test_doctors.py": ["/Necktie/necktieapp/models/doctors.py"]}
└── └── Necktie └── necktieapp ├── admin.py ├── apps.py ├── management │ └── commands │ └── bulk_create.py ├── migrations │ ├── 0001_initial.py │ ├── 0002_alter_doctors_d_phone.py │ ├── 0003_patient.py │ ├── 0004_auto_20211027_2226.py │ ├── 0005_auto_20211028_1129.py │ └── 0006_alter_patient_doctor.py ├── models │ ├── __init__.py │ ├── doctors.py │ ├── patent_doctorTb.py │ └── patients.py ├── serializers │ ├── __init__.py │ └── doctor_serializer.py ├── tests │ └── test_doctors.py ├── urls.py └── views ├── __init__.py ├── doctor_view.py └── patient_view.py
/Necktie/necktieapp/admin.py
from django.contrib import admin from .models import Doctors, Patient, PatentDoctorTb admin.site.register(Doctors) admin.site.register(Patient) admin.site.register(PatentDoctorTb)
/Necktie/necktieapp/apps.py
from django.apps import AppConfig class NecktieappConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'necktieapp'
/Necktie/necktieapp/management/commands/bulk_create.py
import random import string from django.core.management.base import BaseCommand from django.utils.crypto import get_random_string from necktieapp.models import Doctors sample_data = { 'd_surname': get_random_string(), 'd_firstname': get_random_string(), 'd_username': "", 'd_phone': get_random_string(), 'd_address': get_random_string(), 'd_country': get_random_string(), 'd_specialization': "CD", 'd_pincode': 524101, } class Command(BaseCommand): help = 'Create random doctors' def add_arguments(self, parser): parser.add_argument('total', type=int, help='Indicates the number of users to be created') def handle(self, *args, **kwargs): total = kwargs['total'] list_instances = [] Doctors.objects.all().delete() for i in range(total): sample_data['d_username'] = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8)) list_instances.append(Doctors(**sample_data)) Doctors.objects.bulk_create(list_instances)
/Necktie/necktieapp/migrations/0001_initial.py
# Generated by Django 3.2.8 on 2021-10-27 15:55 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Doctors', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('d_surnam', models.CharField(blank=True, max_length=20, null=True)), ('d_firstname', models.CharField(blank=True, max_length=20, null=True)), ('d_username', models.CharField(max_length=40, unique=True)), ('d_phone', models.IntegerField(blank=True, null=True)), ('d_address', models.TextField(blank=True, null=True)), ('d_country', models.CharField(max_length=30)), ('d_state', models.CharField(choices=[('CD', 'Cardiology'), ('GS', 'General Surgery'), ('EC', 'Endocrinology'), ('NT', 'Neonatology')], max_length=4)), ('d_pincode', models.IntegerField()), ], ), ]
/Necktie/necktieapp/migrations/0002_alter_doctors_d_phone.py
# Generated by Django 3.2.8 on 2021-10-27 16:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('necktieapp', '0001_initial'), ] operations = [ migrations.AlterField( model_name='doctors', name='d_phone', field=models.CharField(blank=True, max_length=10, null=True), ), ]
/Necktie/necktieapp/migrations/0003_patient.py
# Generated by Django 3.2.8 on 2021-10-27 16:40 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('necktieapp', '0002_alter_doctors_d_phone'), ] operations = [ migrations.CreateModel( name='Patient', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('p_surname', models.CharField(blank=True, max_length=20, null=True)), ('p_fullname', models.CharField(blank=True, max_length=20, null=True)), ('p_username', models.CharField(max_length=40)), ('p_phone', models.CharField(blank=True, max_length=10, null=True)), ('p_country', models.CharField(blank=True, max_length=50, null=True)), ('p_state', models.CharField(blank=True, max_length=50, null=True)), ('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.doctors')), ], ), ]
/Necktie/necktieapp/migrations/0004_auto_20211027_2226.py
# Generated by Django 3.2.8 on 2021-10-27 16:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('necktieapp', '0003_patient'), ] operations = [ migrations.RemoveField( model_name='patient', name='doctor', ), migrations.CreateModel( name='PatentDoctorTb', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.doctors')), ('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='necktieapp.patient')), ], ), migrations.AddField( model_name='patient', name='doctor', field=models.ManyToManyField(through='necktieapp.PatentDoctorTb', to='necktieapp.Doctors'), ), ]
/Necktie/necktieapp/migrations/0005_auto_20211028_1129.py
# Generated by Django 3.2.8 on 2021-10-28 05:59 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('necktieapp', '0004_auto_20211027_2226'), ] operations = [ migrations.RenameField( model_name='doctors', old_name='d_state', new_name='d_specialization', ), migrations.RenameField( model_name='doctors', old_name='d_surnam', new_name='d_surname', ), ]
/Necktie/necktieapp/migrations/0006_alter_patient_doctor.py
# Generated by Django 3.2.8 on 2021-10-28 06:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('necktieapp', '0005_auto_20211028_1129'), ] operations = [ migrations.AlterField( model_name='patient', name='doctor', field=models.ManyToManyField(blank=True, null=True, through='necktieapp.PatentDoctorTb', to='necktieapp.Doctors'), ), ]
/Necktie/necktieapp/models/__init__.py
from .doctors import Doctors from .patients import Patient from .patent_doctorTb import PatentDoctorTb
/Necktie/necktieapp/models/doctors.py
from django.db import models from model_utils import Choices SPECIALIZATIONS = Choices( ("CD", "Cardiology"), ("GS", "General Surgery"), ("EC", "Endocrinology"), ("NT", "Neonatology"), ) class Doctors(models.Model): d_surname = models.CharField(max_length=20, blank=True, null=True) d_firstname = models.CharField(max_length=20, blank=True, null=True) d_username = models.CharField(max_length=40, blank=False, null=False, unique=True) d_phone = models.CharField(max_length=10, blank=True, null=True) d_address = models.TextField(blank=True, null=True) d_country = models.CharField(max_length=30) d_specialization = models.CharField( choices=SPECIALIZATIONS, max_length=4, blank=False, null=False, ) d_pincode = models.IntegerField() def __str__(self): return self.d_username
/Necktie/necktieapp/models/patent_doctorTb.py
from django.db import models from .doctors import Doctors from .patients import Patient class PatentDoctorTb(models.Model): ''' we can add extra fields here ''' doctor = models.ForeignKey(Doctors, blank=False, null=False, on_delete=models.CASCADE) patient = models.ForeignKey(Patient, blank=False, null=False, on_delete=models.CASCADE)
/Necktie/necktieapp/models/patients.py
from django.db import models from .doctors import Doctors class Patient(models.Model): p_surname = models.CharField(max_length=20, blank=True, null=True) doctor = models.ManyToManyField(Doctors, through="PatentDoctorTb", null=True, blank=True) p_fullname = models.CharField(max_length=20, blank=True, null=True) p_username = models.CharField(max_length=40, blank=False, null=False) p_phone = models.CharField(max_length=10, blank=True, null=True) p_country = models.CharField(max_length=50, blank=True, null=True) p_state = models.CharField(max_length=50, blank=True, null=True) def __str__(self): return self.p_username
/Necktie/necktieapp/serializers/__init__.py
from .doctor_serializer import DoctorSerializer from .patient_serializer import PatientSerializer
/Necktie/necktieapp/serializers/doctor_serializer.py
from rest_framework import serializers from necktieapp.models import Doctors class DoctorSerializer(serializers.ModelSerializer): class Meta: model = Doctors fields = "__all__"
/Necktie/necktieapp/tests/test_doctors.py
from django.test import TestCase, TransactionTestCase from necktieapp.models import Doctors sample_data = { 'd_surname': "sudheer", 'd_firstname': "mandi", 'd_username': "smre", 'd_phone': "7702231789", 'd_address': "Ramalingapuram", 'd_country': "India", 'd_specialization': "CD", 'd_pincode': 524101, } class TestDoctor(TransactionTestCase): fixtures = ["doctors.json"] def test_create_new_record(self): model_instance = Doctors.objects.create(**sample_data) self.assertIsInstance(model_instance, Doctors) self.assertEqual(model_instance.d_username, "smre") def test_update_record(self): instance = Doctors.objects.get(id=1) instance.d_phone = "9177935906" instance.save() self.assertEqual(instance.d_phone, "9177935906") def test_should_not_save_duplicate_username(self): before_count = Doctors.objects.count() sample_data["d_username"] = "smreddy" try: Doctors.objects.create(**sample_data) except Exception as e: after_count = Doctors.objects.count() self.assertEqual(before_count, after_count)
/Necktie/necktieapp/urls.py
from rest_framework.routers import DefaultRouter from django.conf.urls import url, include from necktieapp import views router = DefaultRouter(trailing_slash=False) router.register(r'doctors', views.DoctorViewset) router.register(r'patients', views.PatientViewset) urlpatterns = [ url(r'^v1/', include(router.urls)), ]
/Necktie/necktieapp/views/__init__.py
from .doctor_view import DoctorViewset # noqa: F401 from .patient_view import PatientViewset # noqa: F401
/Necktie/necktieapp/views/doctor_view.py
from django_filters.rest_framework import DjangoFilterBackend from rest_framework import viewsets, filters from rest_framework.permissions import IsAuthenticated from necktieapp.models import Doctors from necktieapp.serializers import DoctorSerializer class DoctorViewset(viewsets.ModelViewSet): permission_classes = (IsAuthenticated,) queryset = Doctors.objects.all() serializer_class = DoctorSerializer filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter] filterset_fields = ['id', 'd_specialization', 'd_username'] search_fields = ['id', 'd_specialization', 'd_username'] ordering_fields = ['id', 'd_specialization', 'd_username']
/Necktie/necktieapp/views/patient_view.py
from django_filters.rest_framework import DjangoFilterBackend from rest_framework import viewsets, filters from rest_framework.permissions import IsAuthenticated from necktieapp.models import Patient from necktieapp.serializers import PatientSerializer class PatientViewset(viewsets.ModelViewSet): permission_classes = (IsAuthenticated,) queryset = Patient.objects.all() serializer_class = PatientSerializer filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter] filterset_fields = ['id', 'p_surname', 'p_username'] search_fields = ['id', 'p_surname', 'p_username'] ordering_fields = ['id', 'p_surname', 'p_username']
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
briis/unifiprotect
refs/heads/master
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
└── └── custom_components └── unifiprotect ├── __init__.py ├── binary_sensor.py ├── button.py ├── models.py ├── services.py └── utils.py
/custom_components/unifiprotect/__init__.py
"""UniFi Protect Platform.""" from __future__ import annotations import asyncio from datetime import timedelta import logging from aiohttp import CookieJar from aiohttp.client_exceptions import ServerDisconnectedError from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, CONF_VERIFY_SSL, EVENT_HOMEASSISTANT_STOP, Platform, ) from homeassistant.core import HomeAssistant, callback from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady from homeassistant.helpers import entity_registry as er from homeassistant.helpers.aiohttp_client import async_create_clientsession from pyunifiprotect import NotAuthorized, NvrError, ProtectApiClient from pyunifiprotect.data import ModelType from .const import ( CONF_ALL_UPDATES, CONF_DOORBELL_TEXT, CONF_OVERRIDE_CHOST, CONFIG_OPTIONS, DEFAULT_SCAN_INTERVAL, DEVICES_FOR_SUBSCRIBE, DEVICES_THAT_ADOPT, DOMAIN, MIN_REQUIRED_PROTECT_V, OUTDATED_LOG_MESSAGE, PLATFORMS, ) from .data import ProtectData from .services import async_cleanup_services, async_setup_services _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=DEFAULT_SCAN_INTERVAL) @callback async def _async_migrate_data( hass: HomeAssistant, entry: ConfigEntry, protect: ProtectApiClient ) -> None: # already up to date, skip if CONF_ALL_UPDATES in entry.options: return _LOGGER.info("Starting entity migration...") # migrate entry options = dict(entry.options) data = dict(entry.data) options[CONF_ALL_UPDATES] = False if CONF_DOORBELL_TEXT in options: del options[CONF_DOORBELL_TEXT] hass.config_entries.async_update_entry(entry, data=data, options=options) # migrate entities registry = er.async_get(hass) mac_to_id: dict[str, str] = {} mac_to_channel_id: dict[str, str] = {} bootstrap = await protect.get_bootstrap() for model in DEVICES_THAT_ADOPT: attr = model.value + "s" for device in getattr(bootstrap, attr).values(): mac_to_id[device.mac] = device.id if model != ModelType.CAMERA: continue for channel in device.channels: channel_id = str(channel.id) if channel.is_rtsp_enabled: break mac_to_channel_id[device.mac] = channel_id count = 0 entities = er.async_entries_for_config_entry(registry, entry.entry_id) for entity in entities: new_unique_id: str | None = None if entity.domain != Platform.CAMERA.value: parts = entity.unique_id.split("_") if len(parts) >= 2: device_or_key = "_".join(parts[:-1]) mac = parts[-1] device_id = mac_to_id[mac] if device_or_key == device_id: new_unique_id = device_id else: new_unique_id = f"{device_id}_{device_or_key}" else: parts = entity.unique_id.split("_") if len(parts) == 2: mac = parts[1] device_id = mac_to_id[mac] channel_id = mac_to_channel_id[mac] new_unique_id = f"{device_id}_{channel_id}" else: device_id = parts[0] channel_id = parts[2] extra = "" if len(parts) == 3 else "_insecure" new_unique_id = f"{device_id}_{channel_id}{extra}" if new_unique_id is None: continue _LOGGER.debug( "Migrating entity %s (old unique_id: %s, new unique_id: %s)", entity.entity_id, entity.unique_id, new_unique_id, ) try: registry.async_update_entity(entity.entity_id, new_unique_id=new_unique_id) except ValueError: _LOGGER.warning( "Could not migrate entity %s (old unique_id: %s, new unique_id: %s)", entity.entity_id, entity.unique_id, new_unique_id, ) else: count += 1 _LOGGER.info("Migrated %s entities", count) if count != len(entities): _LOGGER.warning("%s entities not migrated", len(entities) - count) @callback def _async_import_options_from_data_if_missing( hass: HomeAssistant, entry: ConfigEntry ) -> None: options = dict(entry.options) data = dict(entry.data) modified = False for importable_option in CONFIG_OPTIONS: if importable_option not in entry.options and importable_option in entry.data: options[importable_option] = entry.data[importable_option] del data[importable_option] modified = True if modified: hass.config_entries.async_update_entry(entry, data=data, options=options) async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up the UniFi Protect config entries.""" _async_import_options_from_data_if_missing(hass, entry) session = async_create_clientsession(hass, cookie_jar=CookieJar(unsafe=True)) protect = ProtectApiClient( host=entry.data[CONF_HOST], port=entry.data[CONF_PORT], username=entry.data[CONF_USERNAME], password=entry.data[CONF_PASSWORD], verify_ssl=entry.data[CONF_VERIFY_SSL], session=session, subscribed_models=DEVICES_FOR_SUBSCRIBE, override_connection_host=entry.options.get(CONF_OVERRIDE_CHOST, False), ignore_stats=not entry.options.get(CONF_ALL_UPDATES, False), ) _LOGGER.debug("Connect to UniFi Protect") data_service = ProtectData(hass, protect, SCAN_INTERVAL, entry) try: nvr_info = await protect.get_nvr() except NotAuthorized as err: raise ConfigEntryAuthFailed(err) from err except (asyncio.TimeoutError, NvrError, ServerDisconnectedError) as err: raise ConfigEntryNotReady from err if nvr_info.version < MIN_REQUIRED_PROTECT_V: _LOGGER.error( OUTDATED_LOG_MESSAGE, nvr_info.version, MIN_REQUIRED_PROTECT_V, ) return False await _async_migrate_data(hass, entry, protect) if entry.unique_id is None: hass.config_entries.async_update_entry(entry, unique_id=nvr_info.mac) await data_service.async_setup() if not data_service.last_update_success: raise ConfigEntryNotReady hass.data.setdefault(DOMAIN, {})[entry.entry_id] = data_service hass.config_entries.async_setup_platforms(entry, PLATFORMS) async_setup_services(hass) entry.async_on_unload(entry.add_update_listener(_async_options_updated)) entry.async_on_unload( hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, data_service.async_stop) ) return True async def _async_options_updated(hass: HomeAssistant, entry: ConfigEntry) -> None: """Update options.""" await hass.config_entries.async_reload(entry.entry_id) async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Unload UniFi Protect config entry.""" if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS): data: ProtectData = hass.data[DOMAIN][entry.entry_id] await data.async_stop() hass.data[DOMAIN].pop(entry.entry_id) async_cleanup_services(hass) return bool(unload_ok) async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Migrate old entry.""" _LOGGER.debug("Migrating from version %s", config_entry.version) if config_entry.version == 1: new = {**config_entry.data} # keep verify SSL false for anyone migrating to maintain backwards compatibility new[CONF_VERIFY_SSL] = False if CONF_DOORBELL_TEXT in new: del new[CONF_DOORBELL_TEXT] config_entry.version = 2 hass.config_entries.async_update_entry(config_entry, data=new) _LOGGER.info("Migration to version %s successful", config_entry.version) return True
/custom_components/unifiprotect/binary_sensor.py
"""This component provides binary sensors for UniFi Protect.""" from __future__ import annotations from copy import copy from dataclasses import dataclass import logging from homeassistant.components.binary_sensor import ( BinarySensorDeviceClass, BinarySensorEntity, BinarySensorEntityDescription, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import ATTR_LAST_TRIP_TIME, ATTR_MODEL from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity import EntityCategory from homeassistant.helpers.entity_platform import AddEntitiesCallback from pyunifiprotect.data import NVR, Camera, Event, Light, MountType, Sensor from .const import DOMAIN from .data import ProtectData from .entity import ( EventThumbnailMixin, ProtectDeviceEntity, ProtectNVREntity, async_all_device_entities, ) from .models import ProtectRequiredKeysMixin from .utils import get_nested_attr _LOGGER = logging.getLogger(__name__) _KEY_DOOR = "door" @dataclass class ProtectBinaryEntityDescription( ProtectRequiredKeysMixin, BinarySensorEntityDescription ): """Describes UniFi Protect Binary Sensor entity.""" ufp_last_trip_value: str | None = None MOUNT_DEVICE_CLASS_MAP = { MountType.GARAGE: BinarySensorDeviceClass.GARAGE_DOOR, MountType.WINDOW: BinarySensorDeviceClass.WINDOW, MountType.DOOR: BinarySensorDeviceClass.DOOR, } CAMERA_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = ( ProtectBinaryEntityDescription( key="doorbell", name="Doorbell", device_class=BinarySensorDeviceClass.OCCUPANCY, icon="mdi:doorbell-video", ufp_required_field="feature_flags.has_chime", ufp_value="is_ringing", ufp_last_trip_value="last_ring", ), ProtectBinaryEntityDescription( key="dark", name="Is Dark", icon="mdi:brightness-6", ufp_value="is_dark", ), ) LIGHT_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = ( ProtectBinaryEntityDescription( key="dark", name="Is Dark", icon="mdi:brightness-6", ufp_value="is_dark", ), ProtectBinaryEntityDescription( key="motion", name="Motion Detected", device_class=BinarySensorDeviceClass.MOTION, ufp_value="is_pir_motion_detected", ufp_last_trip_value="last_motion", ), ) SENSE_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = ( ProtectBinaryEntityDescription( key=_KEY_DOOR, name="Contact", device_class=BinarySensorDeviceClass.DOOR, ufp_value="is_opened", ufp_last_trip_value="open_status_changed_at", ufp_enabled="is_contact_sensor_enabled", ), ProtectBinaryEntityDescription( key="battery_low", name="Battery low", device_class=BinarySensorDeviceClass.BATTERY, entity_category=EntityCategory.DIAGNOSTIC, ufp_value="battery_status.is_low", ), ProtectBinaryEntityDescription( key="motion", name="Motion Detected", device_class=BinarySensorDeviceClass.MOTION, ufp_value="is_motion_detected", ufp_last_trip_value="motion_detected_at", ufp_enabled="is_motion_sensor_enabled", ), ProtectBinaryEntityDescription( key="tampering", name="Tampering Detected", device_class=BinarySensorDeviceClass.TAMPER, ufp_value="is_tampering_detected", ufp_last_trip_value="tampering_detected_at", ), ) MOTION_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = ( ProtectBinaryEntityDescription( key="motion", name="Motion", device_class=BinarySensorDeviceClass.MOTION, ufp_value="is_motion_detected", ufp_last_trip_value="last_motion", ), ) DISK_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = ( ProtectBinaryEntityDescription( key="disk_health", name="Disk {index} Health", device_class=BinarySensorDeviceClass.PROBLEM, entity_category=EntityCategory.DIAGNOSTIC, ), ) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up binary sensors for UniFi Protect integration.""" data: ProtectData = hass.data[DOMAIN][entry.entry_id] entities: list[ProtectDeviceEntity] = async_all_device_entities( data, ProtectDeviceBinarySensor, camera_descs=CAMERA_SENSORS, light_descs=LIGHT_SENSORS, sense_descs=SENSE_SENSORS, ) entities += _async_motion_entities(data) entities += _async_nvr_entities(data) async_add_entities(entities) @callback def _async_motion_entities( data: ProtectData, ) -> list[ProtectDeviceEntity]: entities: list[ProtectDeviceEntity] = [] for device in data.api.bootstrap.cameras.values(): for description in MOTION_SENSORS: entities.append(ProtectEventBinarySensor(data, device, description)) _LOGGER.debug( "Adding binary sensor entity %s for %s", description.name, device.name, ) return entities @callback def _async_nvr_entities( data: ProtectData, ) -> list[ProtectDeviceEntity]: entities: list[ProtectDeviceEntity] = [] device = data.api.bootstrap.nvr for index, _ in enumerate(device.system_info.storage.devices): for description in DISK_SENSORS: entities.append( ProtectDiskBinarySensor(data, device, description, index=index) ) _LOGGER.debug( "Adding binary sensor entity %s", (description.name or "{index}").format(index=index), ) return entities class ProtectDeviceBinarySensor(ProtectDeviceEntity, BinarySensorEntity): """A UniFi Protect Device Binary Sensor.""" device: Camera | Light | Sensor entity_description: ProtectBinaryEntityDescription @callback def _async_update_device_from_protect(self) -> None: super()._async_update_device_from_protect() if self.entity_description.key == "doorbell": new_value = self.entity_description.get_ufp_value(self.device) if new_value != self.is_on: _LOGGER.debug( "Changing doorbell sensor from %s to %s", self.is_on, new_value ) self._attr_is_on = self.entity_description.get_ufp_value(self.device) if self.entity_description.ufp_last_trip_value is not None: last_trip = get_nested_attr( self.device, self.entity_description.ufp_last_trip_value ) attrs = self.extra_state_attributes or {} self._attr_extra_state_attributes = { **attrs, ATTR_LAST_TRIP_TIME: last_trip, } # UP Sense can be any of the 3 contact sensor device classes if self.entity_description.key == _KEY_DOOR and isinstance(self.device, Sensor): self.entity_description.device_class = MOUNT_DEVICE_CLASS_MAP.get( self.device.mount_type, BinarySensorDeviceClass.DOOR ) class ProtectDiskBinarySensor(ProtectNVREntity, BinarySensorEntity): """A UniFi Protect NVR Disk Binary Sensor.""" entity_description: ProtectBinaryEntityDescription def __init__( self, data: ProtectData, device: NVR, description: ProtectBinaryEntityDescription, index: int, ) -> None: """Initialize the Binary Sensor.""" description = copy(description) description.key = f"{description.key}_{index}" description.name = (description.name or "{index}").format(index=index) self._index = index super().__init__(data, device, description) @callback def _async_update_device_from_protect(self) -> None: super()._async_update_device_from_protect() disks = self.device.system_info.storage.devices disk_available = len(disks) > self._index self._attr_available = self._attr_available and disk_available if disk_available: disk = disks[self._index] self._attr_is_on = not disk.healthy self._attr_extra_state_attributes = {ATTR_MODEL: disk.model} class ProtectEventBinarySensor(EventThumbnailMixin, ProtectDeviceBinarySensor): """A UniFi Protect Device Binary Sensor with access tokens.""" device: Camera @callback def _async_get_event(self) -> Event | None: """Get event from Protect device.""" event: Event | None = None if self.device.is_motion_detected and self.device.last_motion_event is not None: event = self.device.last_motion_event return event
/custom_components/unifiprotect/button.py
"""Support for Ubiquiti's UniFi Protect NVR.""" from __future__ import annotations import logging from homeassistant.components.button import ButtonDeviceClass, ButtonEntity from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from pyunifiprotect.data.base import ProtectAdoptableDeviceModel from .const import DEVICES_THAT_ADOPT, DOMAIN from .data import ProtectData from .entity import ProtectDeviceEntity _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Discover devices on a UniFi Protect NVR.""" data: ProtectData = hass.data[DOMAIN][entry.entry_id] async_add_entities( [ ProtectButton( data, device, ) for device in data.get_by_types(DEVICES_THAT_ADOPT) ] ) class ProtectButton(ProtectDeviceEntity, ButtonEntity): """A Ubiquiti UniFi Protect Reboot button.""" _attr_entity_registry_enabled_default = False _attr_device_class = ButtonDeviceClass.RESTART def __init__( self, data: ProtectData, device: ProtectAdoptableDeviceModel, ) -> None: """Initialize an UniFi camera.""" super().__init__(data, device) self._attr_name = f"{self.device.name} Reboot Device" async def async_press(self) -> None: """Press the button.""" _LOGGER.debug("Rebooting %s with id %s", self.device.model, self.device.id) await self.device.reboot()
/custom_components/unifiprotect/models.py
"""The unifiprotect integration models.""" from __future__ import annotations from collections.abc import Callable, Coroutine from dataclasses import dataclass import logging from typing import Any from homeassistant.helpers.entity import EntityDescription from pyunifiprotect.data import NVR, ProtectAdoptableDeviceModel from .utils import get_nested_attr _LOGGER = logging.getLogger(__name__) @dataclass class ProtectRequiredKeysMixin: """Mixin for required keys.""" ufp_required_field: str | None = None ufp_value: str | None = None ufp_value_fn: Callable[[ProtectAdoptableDeviceModel | NVR], Any] | None = None ufp_enabled: str | None = None def get_ufp_value(self, obj: ProtectAdoptableDeviceModel | NVR) -> Any: """Return value from UniFi Protect device.""" if self.ufp_value is not None: return get_nested_attr(obj, self.ufp_value) if self.ufp_value_fn is not None: return self.ufp_value_fn(obj) # reminder for future that one is required raise RuntimeError( # pragma: no cover "`ufp_value` or `ufp_value_fn` is required" ) def get_ufp_enabled(self, obj: ProtectAdoptableDeviceModel | NVR) -> bool: """Return value from UniFi Protect device.""" if self.ufp_enabled is not None: return bool(get_nested_attr(obj, self.ufp_enabled)) return True @dataclass class ProtectSetableKeysMixin(ProtectRequiredKeysMixin): """Mixin to for settable values.""" ufp_set_method: str | None = None ufp_set_method_fn: Callable[ [ProtectAdoptableDeviceModel, Any], Coroutine[Any, Any, None] ] | None = None async def ufp_set(self, obj: ProtectAdoptableDeviceModel, value: Any) -> None: """Set value for UniFi Protect device.""" assert isinstance(self, EntityDescription) _LOGGER.debug("Setting %s to %s for %s", self.name, value, obj.name) if self.ufp_set_method is not None: await getattr(obj, self.ufp_set_method)(value) elif self.ufp_set_method_fn is not None: await self.ufp_set_method_fn(obj, value)
/custom_components/unifiprotect/services.py
"""UniFi Protect Integration services.""" from __future__ import annotations import asyncio import functools from typing import Any from homeassistant.config_entries import ConfigEntryState from homeassistant.const import ATTR_DEVICE_ID from homeassistant.core import HomeAssistant, ServiceCall, callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import config_validation as cv, device_registry as dr from homeassistant.helpers.service import async_extract_referenced_entity_ids from pydantic import ValidationError from pyunifiprotect.api import ProtectApiClient from pyunifiprotect.exceptions import BadRequest import voluptuous as vol from .const import ATTR_MESSAGE, DOMAIN from .data import ProtectData SERVICE_ADD_DOORBELL_TEXT = "add_doorbell_text" SERVICE_REMOVE_DOORBELL_TEXT = "remove_doorbell_text" SERVICE_SET_DEFAULT_DOORBELL_TEXT = "set_default_doorbell_text" ALL_GLOBAL_SERIVCES = [ SERVICE_ADD_DOORBELL_TEXT, SERVICE_REMOVE_DOORBELL_TEXT, SERVICE_SET_DEFAULT_DOORBELL_TEXT, ] DOORBELL_TEXT_SCHEMA = vol.All( vol.Schema( { **cv.ENTITY_SERVICE_FIELDS, vol.Required(ATTR_MESSAGE): cv.string, }, ), cv.has_at_least_one_key(ATTR_DEVICE_ID), ) def _async_all_ufp_instances(hass: HomeAssistant) -> list[ProtectApiClient]: """All active UFP instances.""" return [ data.api for data in hass.data[DOMAIN].values() if isinstance(data, ProtectData) ] @callback def _async_unifi_mac_from_hass(mac: str) -> str: # MAC addresses in UFP are always caps return mac.replace(":", "").upper() @callback def _async_get_macs_for_device(device_entry: dr.DeviceEntry) -> list[str]: return [ _async_unifi_mac_from_hass(cval) for ctype, cval in device_entry.connections if ctype == dr.CONNECTION_NETWORK_MAC ] @callback def _async_get_ufp_instances( hass: HomeAssistant, device_id: str ) -> tuple[dr.DeviceEntry, ProtectApiClient]: device_registry = dr.async_get(hass) if not (device_entry := device_registry.async_get(device_id)): raise HomeAssistantError(f"No device found for device id: {device_id}") if device_entry.via_device_id is not None: return _async_get_ufp_instances(hass, device_entry.via_device_id) macs = _async_get_macs_for_device(device_entry) ufp_instances = [ i for i in _async_all_ufp_instances(hass) if i.bootstrap.nvr.mac in macs ] if not ufp_instances: # should not be possible unless user manually enters a bad device ID raise HomeAssistantError( # pragma: no cover f"No UniFi Protect NVR found for device ID: {device_id}" ) return device_entry, ufp_instances[0] @callback def _async_get_protect_from_call( hass: HomeAssistant, call: ServiceCall ) -> list[tuple[dr.DeviceEntry, ProtectApiClient]]: referenced = async_extract_referenced_entity_ids(hass, call) instances: list[tuple[dr.DeviceEntry, ProtectApiClient]] = [] for device_id in referenced.referenced_devices: instances.append(_async_get_ufp_instances(hass, device_id)) return instances async def _async_call_nvr( instances: list[tuple[dr.DeviceEntry, ProtectApiClient]], method: str, *args: Any, **kwargs: Any, ) -> None: try: await asyncio.gather( *(getattr(i.bootstrap.nvr, method)(*args, **kwargs) for _, i in instances) ) except (BadRequest, ValidationError) as err: raise HomeAssistantError(str(err)) from err async def add_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None: """Add a custom doorbell text message.""" message: str = call.data[ATTR_MESSAGE] instances = _async_get_protect_from_call(hass, call) await _async_call_nvr(instances, "add_custom_doorbell_message", message) async def remove_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None: """Remove a custom doorbell text message.""" message: str = call.data[ATTR_MESSAGE] instances = _async_get_protect_from_call(hass, call) await _async_call_nvr(instances, "remove_custom_doorbell_message", message) async def set_default_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None: """Set the default doorbell text message.""" message: str = call.data[ATTR_MESSAGE] instances = _async_get_protect_from_call(hass, call) await _async_call_nvr(instances, "set_default_doorbell_message", message) def async_setup_services(hass: HomeAssistant) -> None: """Set up the global UniFi Protect services.""" services = [ ( SERVICE_ADD_DOORBELL_TEXT, functools.partial(add_doorbell_text, hass), DOORBELL_TEXT_SCHEMA, ), ( SERVICE_REMOVE_DOORBELL_TEXT, functools.partial(remove_doorbell_text, hass), DOORBELL_TEXT_SCHEMA, ), ( SERVICE_SET_DEFAULT_DOORBELL_TEXT, functools.partial(set_default_doorbell_text, hass), DOORBELL_TEXT_SCHEMA, ), ] for name, method, schema in services: if hass.services.has_service(DOMAIN, name): continue hass.services.async_register(DOMAIN, name, method, schema=schema) def async_cleanup_services(hass: HomeAssistant) -> None: """Cleanup global UniFi Protect services (if all config entries unloaded).""" loaded_entries = [ entry for entry in hass.config_entries.async_entries(DOMAIN) if entry.state == ConfigEntryState.LOADED ] if len(loaded_entries) == 1: for name in ALL_GLOBAL_SERIVCES: hass.services.async_remove(DOMAIN, name)
/custom_components/unifiprotect/utils.py
"""UniFi Protect Integration utils.""" from __future__ import annotations from enum import Enum from typing import Any def get_nested_attr(obj: Any, attr: str) -> Any: """Fetch a nested attribute.""" attrs = attr.split(".") value = obj for key in attrs: if not hasattr(value, key): return None value = getattr(value, key) if isinstance(value, Enum): value = value.value return value
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
beichao1314/TREC2016
refs/heads/master
{"/run2_crawlerA/crawler.py": ["/run1_crawlerA/process_profile.py", "/run2_crawlerA/estimate_time.py", "/run2_crawlerA/Rest.py", "/run2_crawlerA/rewritesummary.py", "/run2_crawlerA/extension.py"]}
└── ├── run1_crawlerA │ └── process_profile.py └── run2_crawlerA ├── Rest.py ├── crawler.py ├── estimate_time.py ├── extension.py └── rewritesummary.py
/run1_crawlerA/process_profile.py
# -*- coding: utf-8 -*- """ Created on Thu Jul 7 16:17:49 2016 @author: xiaobei """ import nltk from nltk.corpus import stopwords import re def removeStopWords_1(originSegs): stops = set(stopwords.words('english')) resultStr = [seg.lower() for seg in originSegs if seg.lower() not in stops and seg.isalpha()] return resultStr def filters(content): results = re.compile(r'http://[a-zA-Z0-9.?/&=:]*', re.S) filter = results.sub("", content) return filter def preprocess_profile(sentence): # filterwords = filters(sentence) words = nltk.word_tokenize(sentence) removestopwords = removeStopWords_1(words) result = stemword(removestopwords) return result def stemword(word): porter = nltk.PorterStemmer().stem result = list(map(porter, word)) return result
/run2_crawlerA/Rest.py
# -*- coding: utf-8 -*- """ Created on Fri Jul 8 19:37:33 2016 @author: xiaobei """ import pycurl import requests import json import logging logging.basicConfig(level=logging.INFO) class REST(object): def __init__(self,clientid): self.clientid=clientid self.c = pycurl.Curl() self.c.setopt(pycurl.CUSTOMREQUEST, 'POST') self.c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json']) def GetTopic(self): # curl -H 'Content-Type: application/json' hostname.com/topics/abcdefghijk url = "http://54.164.151.19:80/topics/" + self.clientid header = {'content-type': 'application/json'} r = requests.get(url, headers=header) return json.loads(r.text) def Post(self, topicid, tweetid): url = "http://54.164.151.19:80/tweet/" + topicid + "/" + tweetid + "/" + self.clientid self.c.setopt(pycurl.URL, url) self.c.perform() # r = self.c.getinfo(pycurl.HTTP_CODE) return True
/run2_crawlerA/crawler.py
# -*- coding: utf-8 -*- """ Created on Thu Jul 7 17:10:58 2016 @author: xiaobei """ from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream import logging.handlers from rewritesummary import PushSummary from Rest import REST import json from estimate_time import Time from extension import search as sch import logging from process_profile import preprocess_profile import time consumer_key = "bEyQ2mZRAABCIdZajeaYhpnUe" consumer_secret = "kJUa3IHjUFm1znHCoAnaDQY7RUPGzcMqveFcgvsh3i7v4Jta3b" access_token = "2910563640-Z77URQhoPhDsg393yazywkd0WHjjqWrn1tlV8aH" access_token_secret = "gPRcz33gphQL2VTDEQ40Uu8yTqVNoOwXZ1TAMQYSV4MHm" logging.basicConfig(level=logging.INFO) class TweetListener(StreamListener): def __init__(self, api=None): super(TweetListener, self).__init__(api) self.logger = logging.getLogger('tweetlogger') # print('a') statusHandler = logging.handlers.TimedRotatingFileHandler('status.log', when='H', encoding='utf-8', utc=True) statusHandler.setLevel(logging.INFO) self.logger.addHandler(statusHandler) warningHandler = logging.handlers.TimedRotatingFileHandler('warning.log', when='H', encoding='utf-8', utc=True) warningHandler.setLevel(logging.WARN) self.logger.addHandler(warningHandler) logging.captureWarnings(True) consoleHandler = logging.StreamHandler() consoleHandler.setLevel(logging.WARN) self.logger.addHandler(consoleHandler) # self.logger.setLevel(logging.INFO) self.count = 0 def on_data(self, data): data=json.loads(data,encoding='utf-8') # print(data) pushSummary.pushSummarys(data) self.count += 1 # self.logger.info(data) # with open('test.txt','a') as f: # f.write(data+'\n') # print(data) # tweet=json.load(data) # print(type(tweet)) # pushSummary.pushSummarys(json.loads(data)) # print(self.count) if self.count % 1000 == 0: print("%d statuses processed %s" % (self.count, time.strftime('%X', time.localtime(time.time())))) return True def on_error(self, exception): self.logger.warning(str(exception)) if __name__ == '__main__': listener = TweetListener() auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) stream = Stream(auth, listener) # clientid = rest.Getclientid() with open('clientidrun2.txt', 'r') as f: clientid = f.read() rest = REST(clientid) # topics = rest.GetTopic() # interest_files = {} # count = 0 # x = 0 # topicid = {} # for i in topics: # number = i['topid'] # title = i['title'] # Desc = i['description'] # Narr = i['narrative'] # title = preprocess_profile(title) # Desc = preprocess_profile(Desc) # Narr = preprocess_profile(Narr) # tf = {} # for word in title: # if word in tf: # tf[word] += 1 # else: # tf[word] = 1 # for word in Desc: # if word in tf: # tf[word] += 1 # else: # tf[word] = 1 # for word in Narr: # if word in tf: # tf[word] += 1 # else: # tf[word] = 1 # a = sorted(tf.items(), key=lambda d: d[1], reverse=True) # b = [d[0] for d in a[0:5]] # stemwords_interest_profile = b # b = ' '.join(b) # s = sch(b) # count += 1 # logging.info(count) # search = [] # stf = {} # for i in s: # j = preprocess_profile(i.title) # for k in j: # f = [] # for l in k: # if ord(l) < 127: # f.append(l) # search.append(''.join(f)) # for word in search: # if word in stf: # stf[word] += 1 # else: # stf[word] = 1 # d = sorted(stf.items(), key=lambda d: d[1], reverse=True) # e = 0 # for n in range(len(d)): # if d[n][0] not in stemwords_interest_profile: # stemwords_interest_profile.append(d[n][0]) # e += 1 # if e >= 5: # break # interest_files[x] = stemwords_interest_profile # topicid[x] = number # x += 1 with open('q_e.txt', 'r') as f: c = f.read() interest_files = eval(c) with open('q_x.txt', 'r') as ff: d = ff.read() topicid = eval(d) # with open('q_e.txt', 'w') as f: # f.write(str(interest_files)) # with open('q_x.txt', 'w') as ff: # ff.write(str(topicid)) times = Time('Tue Aug 02 00:00:00 +0000 2016') fa = open('A.txt', 'a', encoding='utf-8') pushSummary = PushSummary(0.9, interest_files, times, rest, fa, topicid) while True: try: stream.sample() except Exception as ex: print(str(ex)) pass
/run2_crawlerA/estimate_time.py
# from datetime import datetime import datetime import time as T from email.utils import parsedate class Time(object): def __init__(self, firsttime): self.firsttime = parsedate(firsttime) self.firsttime = datetime.datetime.fromtimestamp(T.mktime(self.firsttime)) def calculatetime(self, time): time = parsedate(time) time = datetime.datetime.fromtimestamp(T.mktime(time)) t = (time - self.firsttime).days return t def settime(self): self.firsttime = self.firsttime + datetime.timedelta(hours=24)
/run2_crawlerA/extension.py
from py_bing_search import PyBingWebSearch # s1= 9uCkTYlAG9x4iPdxAeDuQipYvc2vEn6oUbPKZJnFlVY # s2=3L8LwEROeBFVSA1FwUVKLfIO+Ue979rarr+Y4mBZwaE s3 = 'E+ok1GP7qpi6xgtE0yfsbrQFZSElgMBK2ZD1kwf/WXA' s4 = 'AKvk0/D9XzJuCQA9n/a+TFbqwOFder9xd9Yj/22ivA8' s5='r8OUqrE+DW/W4qs8ShfN2ljAU8214AkuksvYy7iMPGk' def search(search_term): bing_web = PyBingWebSearch(s5, search_term,web_only=False) first_ten_result = bing_web.search(limit=10, format='json') return first_ten_result
/run2_crawlerA/rewritesummary.py
import pymysql from process import preprocess import time as T import nltk import math import operator class PushSummary(): def __init__(self, lemda, interest_files, time, rest, fa, topicid): self.topicid = topicid self.L = len(self.topicid) self.SumOfLenthOfStream = 0 self.wordInStream = {} self.lemda = lemda self.interest_files = interest_files self.time = time self.day = 1 self.rest = rest self.fa = fa self.tfidfthresholdA = [] self.jsdthresholdA = [] self.lmsthresholdA = [] self.tfidfthresholdB = [] self.jsdthresholdB = [] self.lmsthresholdB = [] self.numofdayA = [] self.numofdayB = [] self.queries_numOfTweet = [] self.queries_numOfWord = [] self.queries_word = [] self.queries_occur = [] self.summaryA = [] self.summaryB = [] self.qoccur = [] self.numofq = [] self.numofqinstream = {} for i in range(self.L): # self.numofdayA[number] = 0 # self.numofdayB[number] = 0 self.numofdayA.append(0) self.numofdayB.append(0) self.queries_word.append({}) self.queries_occur.append({}) self.summaryA.append([]) self.summaryB.append([]) # self.word_tweet_query.append({}) self.qoccur.append({}) self.numofq.append({}) self.tfidfthresholdA.append(0.7) self.jsdthresholdA.append(0.04) self.lmsthresholdA.append(0.02) self.tfidfthresholdB.append(0.5) self.jsdthresholdB.append(0.04) self.lmsthresholdB.append(0.02) self.queries_numOfTweet.append(0) self.queries_numOfWord.append(0) def pushSummarys(self, tweet): if ('delete' not in tweet) and (tweet['lang'] == 'en'): if 'retweeted_status' in tweet: tem = tweet['retweeted_status'] tem['timestamp_ms'] = tweet['timestamp_ms'] tem['created_at'] = tweet['created_at'] tweet = tem delta = self.time.calculatetime(tweet['created_at']) if delta >= 1: for x in range(self.L): stemwords_interest_profile = self.interest_files[x] self.numofdayA[x] = 0 self.numofdayB[x] = 0 listofsummaryA = [summary[0] for summary in self.summaryA[x] if summary[1] == self.day] if len(listofsummaryA) > 0: self.tfidfthresholdA[x] = min(summaryA[2] for summaryA in listofsummaryA) listofsummaryB = [summary[0] for summary in self.summaryB[x] if summary[1] == self.day] if len(listofsummaryB) > 0: self.tfidfthresholdB[x] = min(summaryB[2] for summaryB in listofsummaryB) sumoflen = sum(summaryBBBB[5] for summaryBBBB in listofsummaryB) ADL = sumoflen / len(listofsummaryB) lenofq = len(stemwords_interest_profile) result = [] for summaryBBB in listofsummaryB: score = 0 TF = summaryBBB[4] for q in stemwords_interest_profile: tf = TF[q] avgtf = sum(TF[qq] for qq in stemwords_interest_profile) / len(TF) RITF = math.log2(1 + tf) / math.log2(1 + avgtf) LRTF = tf * math.log2(1 + ADL / summaryBBB[5])+0.0001 w = 2 / (1 + math.log2(1 + lenofq)) TFF = w * RITF / (1 + RITF) + (1 - w) * LRTF / (1 + LRTF) IDF = math.log((len(listofsummaryB) + 1) / (self.qoccur[x][q] + 1)) + 0.0001 AEF = self.numofq[x][q] / (self.qoccur[x][q] + 1) TDF = IDF * AEF / (1 + AEF) sim = TFF * TDF score += sim del tf, avgtf, RITF, LRTF, w, TFF, IDF, AEF, TDF, sim result.append([score, summaryBBB[1]]) del listofsummaryB result.sort(key=operator.itemgetter(0), reverse=True) j = 1 for i in result: if (self.day) > 9: d = '201608' + str(self.day) else: d = '2016080' + str(self.day) with open('B.txt', 'a') as ff: ff.write( '%s %s Q0 %s %s %s CCNUNLPrun2\n' % ( d, self.topicid[x], i[1], str(j), i[0])) j = j + 1 self.time.settime() self.day = self.day + 1 content = tweet['text'] stemwords_tweet = preprocess(content) del content wordInTweet = {} if stemwords_tweet == False: pass else: numOfWordAtweet = len(stemwords_tweet) self.SumOfLenthOfStream = numOfWordAtweet + self.SumOfLenthOfStream id_str = tweet['id_str'] for word in stemwords_tweet: if word in self.wordInStream: self.wordInStream[word] += 1 else: self.wordInStream[word] = 1 if word in wordInTweet: wordInTweet[word] += 1 else: wordInTweet[word] = 1 for x in range(self.L): stemwords_interest_profile = self.interest_files[x] for q in stemwords_interest_profile: if q in self.numofqinstream: self.numofqinstream[q] += stemwords_tweet.count(q) else: self.numofqinstream[q] = stemwords_tweet.count(q) for x in range(self.L): stemwords_interest_profile = self.interest_files[x] count = sum(stemwords_tweet.count(wordsss) for wordsss in stemwords_interest_profile) lenofq = len(stemwords_interest_profile) if count >= 1: qt = {} for qqq in stemwords_interest_profile: if qqq in qt: qt[qqq] += stemwords_tweet.count(qqq) else: qt[qqq] = stemwords_tweet.count(qqq) lms = 0 samewords = [q for q in stemwords_interest_profile if q in stemwords_tweet] for qq in samewords: Pq = self.lemda * 1.0 / float(lenofq) + (1 - self.lemda) * float( self.numofqinstream[qq]) / float( self.SumOfLenthOfStream) Pt = self.lemda * qt[qq] / float(numOfWordAtweet) + (1 - self.lemda) * float( self.numofqinstream[qq]) / float(self.SumOfLenthOfStream) M = 0.5 * (Pq + Pt) lms += 0.5 * Pq * math.log(Pq / M) + 0.5 * Pt * math.log(Pt / M) if lms <= self.lmsthresholdA[x]: sumoftfidf = 0.0 for word in stemwords_tweet: if word in self.queries_word[x]: self.queries_word[x][word] += 1 else: self.queries_word[x][word] = 1 for word in set(stemwords_tweet): if word not in self.queries_occur[x]: self.queries_occur[x][word] = 1 else: self.queries_occur[x][word] += 1 self.queries_numOfWord[x] += numOfWordAtweet self.queries_numOfTweet[x] += 1 for word in stemwords_tweet: tf = self.queries_word[x][word] / self.queries_numOfWord[x] idf = math.log2((self.queries_numOfTweet[x] + 1) / self.queries_occur[x][word]) sumoftfidf = sumoftfidf + tf * idf if sumoftfidf >= self.tfidfthresholdA[x] and self.numofdayA[x] < 10: listofsummaryA = [summary[0] for summary in self.summaryA[x]] if len(listofsummaryA) > 0: jsd = [] for summary in listofsummaryA: sumofjsd = 0 tf = {} for wordss in summary[0]: if wordss in tf: tf[wordss] += 1 else: tf[wordss] = 1 sameword = [word for word in stemwords_tweet if word in summary[0]] if len(sameword) > 0: for word in sameword: Pti = float(wordInTweet[word]) / float(numOfWordAtweet) Psi = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream) thetaTi = self.lemda * Pti + (1 - self.lemda) * Psi Ptj = float(tf[word]) / float(len(summary[0])) Psj = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream) thetaTj = self.lemda * Ptj + (1 - self.lemda) * Psj # sumofjsd += thetaTi * math.log(thetaTi / thetaTj) M = float((thetaTi + thetaTj) / 2) sumofjsd += 0.5 * (thetaTi * math.log(thetaTi / M)) + 0.5 * ( thetaTj * math.log(thetaTj / M)) jsd.append(sumofjsd) else: jsd.append(0.06) JSD = min(jsd) else: JSD = 0.04 # print('kld:' + str(JSD)) if JSD >= self.jsdthresholdA[x]: #self.rest.Post(self.topicid[x], id_str) self.lmsthresholdA[x] = lms self.jsdthresholdA[x] = JSD self.numofdayA[x] += 1 a = [stemwords_tweet, id_str, sumoftfidf, JSD] self.summaryA[x].append([a, self.day]) self.fa.write('%s %s tfidf:%s jsd:%s lms:%s\n' % (self.day, self.topicid[x], sumoftfidf, JSD,lms)) if lms <= self.lmsthresholdB[x]: sumoftfidf = 0.0 for word in stemwords_tweet: if word in self.queries_word[x]: self.queries_word[x][word] += 1 else: self.queries_word[x][word] = 1 for word in set(stemwords_tweet): if word not in self.queries_occur[x]: self.queries_occur[x][word] = 1 else: self.queries_occur[x][word] += 1 self.queries_numOfWord[x] += numOfWordAtweet self.queries_numOfTweet[x] += 1 for word in stemwords_tweet: tf = self.queries_word[x][word] / self.queries_numOfWord[x] idf = math.log2((self.queries_numOfTweet[x] + 1) / self.queries_occur[x][word]) sumoftfidf = sumoftfidf + tf * idf if sumoftfidf >= self.tfidfthresholdB[x] and self.numofdayB[x] < 100: listofsummaryB = [summary[0] for summary in self.summaryB[x]] if len(listofsummaryB) > 0: jsd = [] for summary in listofsummaryB: sumofjsd = 0 sameword = [word for word in stemwords_tweet if word in summary[0]] tf = {} for wordss in summary[0]: if wordss in tf: tf[wordss] += 1 else: tf[wordss] = 1 if len(sameword) > 0: for word in sameword: Pti = float(wordInTweet[word]) / float(numOfWordAtweet) Psi = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream) thetaTi = self.lemda * Pti + (1 - self.lemda) * Psi Ptj = float(tf[word]) / float(len(summary[0])) Psj = float(self.wordInStream[word]) / float(self.SumOfLenthOfStream) thetaTj = self.lemda * Ptj + (1 - self.lemda) * Psj # sumofjsd += thetaTi * math.log(thetaTi / thetaTj) M = float((thetaTi + thetaTj) / 2) sumofjsd += 0.5 * (thetaTi * math.log(thetaTi / M)) + 0.5 * ( thetaTj * math.log(thetaTj / M)) jsd.append(sumofjsd) else: jsd.append(0.06) JSD = min(jsd) else: JSD = 0.04 if JSD >= self.jsdthresholdB[x]: self.numofdayB[x] += 1 lenoflistB=len(listofsummaryB) self.jsdthresholdB[x]=(self.jsdthresholdB[x]*lenoflistB+JSD)/(lenoflistB+1) self.lmsthresholdB[x]=(self.lmsthresholdB[x]*lenoflistB+JSD)/(lenoflistB+1) TF = {} for q in stemwords_interest_profile: TF[q] = stemwords_tweet.count(q) if q in stemwords_tweet: if q in self.qoccur[x]: self.qoccur[x][q] += 1 else: self.qoccur[x][q] = 1 else: self.qoccur[x][q] = 0 if q in self.numofq[x]: self.numofq[x][q] += stemwords_tweet.count(q) else: self.numofq[x][q] = stemwords_tweet.count(q) b = [stemwords_tweet, id_str, sumoftfidf, JSD, TF, numOfWordAtweet] self.summaryB[x].append([b, self.day]) pass
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
nurtai00/WebDevProjectBack
refs/heads/main
{"/projectback/api/admin.py": ["/projectback/api/models.py"], "/projectback/api/serializers.py": ["/projectback/api/models.py"], "/projectback/api/views.py": ["/projectback/api/models.py", "/projectback/api/serializers.py"], "/projectback/api/urls.py": ["/projectback/api/views.py"]}
└── └── projectback └── api ├── admin.py ├── migrations │ └── 0002_auto_20210508_0140.py ├── models.py ├── serializers.py ├── urls.py └── views.py
/projectback/api/admin.py
from django.contrib import admin from api.models import Product, Category, Cart, User # Register your models here. admin.site.register(Product), admin.site.register(Category), admin.site.register(Cart), admin.site.register(User)
/projectback/api/migrations/0002_auto_20210508_0140.py
# Generated by Django 3.2.2 on 2021-05-07 19:40 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('api', '0001_initial'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=99)), ], ), migrations.CreateModel( name='User', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('username', models.CharField(max_length=20)), ('password', models.CharField(max_length=2222)), ], ), migrations.RemoveField( model_name='product', name='address', ), migrations.RemoveField( model_name='product', name='city', ), migrations.AddField( model_name='product', name='price', field=models.IntegerField(default=0), ), migrations.CreateModel( name='Cart', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('username', models.CharField(max_length=50)), ('address', models.TextField()), ('book', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.product')), ], ), migrations.AddField( model_name='product', name='category', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.category'), ), ]
/projectback/api/models.py
from django.db import models class Category(models.Model): name = models.CharField(max_length=200) description = models.TextField(max_length=500, default='') def to_json(self): return { 'id': self.id, 'name': self.name, 'description': self.description } class Product(models.Model): name = models.CharField(max_length=200) description = models.TextField(max_length=500, default='') price = models.IntegerField(default=0) category = models.ForeignKey(Category, null=True, on_delete=models.CASCADE, blank=True) def to_json(self): return { 'id': self.id, 'name': self.name, 'description': self.description, 'price': self.price } class Cart(models.Model): username = models.CharField(max_length=50) address = models.TextField() book = models.ForeignKey(Product, null=True, on_delete=models.CASCADE, blank=True) class User(models.Model): username = models.CharField(max_length=20) password = models.CharField(max_length=2222)
/projectback/api/serializers.py
from rest_framework import serializers from api.models import Category, Product, Cart, User class CategoryModelSerializer(serializers.ModelSerializer): class Meta: model = Category fields = ('id', 'name', 'description') class ProductSerializer(serializers.Serializer): class Meta: model = Product fields = ('name', 'description', 'price', 'category') class CartSerializer(serializers.Serializer): class Meta: model = Cart fields = ('username', 'address', 'book') class UserModelSerializer(serializers.ModelSerializer): class Meta: model = User fields = ('id', 'username', 'password')
/projectback/api/urls.py
from django.urls import path from api import views from api.views import product_list, product_detail, category_list, product2_list, category2_list urlpatterns = [ path('api/product', product_list), path('api/product/<int:product_id>/', product_detail), path('api/category', category_list), path('api/product2', product2_list), path('api/category2', category2_list), path('api/product-list', views.ProductViewSet.as_view(), name='product-list'), path('product-list/<str:pk>/', views.ProductDetailViewSet.as_view(), name='product-detail') ]
/projectback/api/views.py
from api.models import Product, Category from django.http.response import JsonResponse from api.serializers import CategoryModelSerializer, ProductSerializer, CartSerializer, UserModelSerializer from rest_framework.decorators import api_view, permission_classes from rest_framework.response import Response from rest_framework.views import APIView from django.contrib.auth import authenticate from django.views.decorators.csrf import csrf_exempt from rest_framework.authtoken.models import Token from rest_framework.decorators import api_view, permission_classes from rest_framework.permissions import AllowAny from rest_framework.status import ( HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND, HTTP_200_OK ) from rest_framework.response import Response def product_list(request): product = Product.objects.all() product_json = [product.to_json() for product in product] return JsonResponse(product_json, safe=False) def product_detail(request, product_id): try: product = Product.objects.get(id=product_id) except Product.DoesNotExist as e: return JsonResponse({'message': str(e)}, status=400) return JsonResponse(product.to_json()) def category_list(request): category = Category.objects.all() category_json = [category.to_json() for category in category] return JsonResponse(category_json, safe=False) @api_view(['GET', 'POST']) def product2_list(request): if request.method == 'GET': try: products = Product.objects.all() serializer = ProductSerializer(products, many=True) return JsonResponse(serializer.data, safe=False) except: return JsonResponse({"status": "500"}, safe=False) if request.method == 'POST': try: category = Category.objects.get(name=request.data['category']) except: return JsonResponse({"status": "200"}, safe=False) Product.objects.create( category=category, name=request.data['name'], description=request.data['description'], image=request.data['image'], price=request.data['price'] ) return JsonResponse({"status": "200"}, safe=False) @api_view(['GET', 'POST']) def category2_list(request): if request.method == 'GET': try: categories = Category.objects.all() serializer = CategoryModelSerializer(categories, many=True) return JsonResponse(serializer.data, safe=False) except: return JsonResponse({"status": "505"}, safe=False) if request.method == 'POST': try: category = Category.objects.get(name=request.data['category']) serializer = CategoryModelSerializer(category, many=True) return JsonResponse(serializer.data, safe=False) except: return JsonResponse({'status': '200'}, safe=False) class ProductViewSet(APIView): @staticmethod def get(request): queryset = Product.objects.all() serializer = ProductSerializer(queryset, many=True) return JsonResponse(serializer.data, safe=False) class ProductDetailViewSet(APIView): @staticmethod def get(request, pk): queryset = Product.objects.get(id=pk) serializer = ProductSerializer(queryset, many=False) return Response(serializer.data) @csrf_exempt @api_view(["POST"]) @permission_classes((AllowAny,)) def login(request): username = request.data.get("username") password = request.data.get("password") if username is None or password is None: return Response({'error': 'Please provide both username and password'}, status=HTTP_400_BAD_REQUEST) user = authenticate(username=username, password=password) if not user: return Response({'error': 'Invalid Credentials'}, status=HTTP_404_NOT_FOUND) token, _ = Token.objects.get_or_create(user=user) return Response({'token': token.key}, status=HTTP_200_OK) @csrf_exempt @api_view(["GET"]) def sample_api(request): data = {'sample_data': 123} return Response(data, status=HTTP_200_OK) # def get(request): # queryset = Product.objects.all() # serializer = ProductSerializer(queryset, many=True) # return Response(serializer.data) # # # class ProductViewSet(APIView): # pass # # # def get(request, pk): # queryset = Product.objects.get(id=pk) # serializer = ProductSerializer(queryset, many=False) # return Response(serializer.data) # # # class ProductDetailViewSet(APIView): # pass
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
vanya2143/ITEA-tasks
refs/heads/master
{"/hw-6/task_2.py": ["/hw-6/task_1.py"]}
└── ├── hw-1 │ ├── task_1.py │ ├── task_2.py │ └── task_3.py ├── hw-2 │ ├── task_1.py │ └── task_2.py ├── hw-3 │ └── task_1.py ├── hw-4 │ └── task_1.py ├── hw-5 │ └── task_1.py ├── hw-6 │ ├── task_1.py │ └── task_2.py └── hw-7 └── task_1.py
/hw-1/task_1.py
""" 1. Определить количество четных и нечетных чисел в заданном списке. Оформить в виде функции, где на вход будет подаваться список с целыми числами. Результат функции должен быть 2 числа, количество четных и нечетных соответственно. """ def list_check(some_list): even_numb = 0 not_even_numb = 0 for elem in some_list: if elem % 2 == 0: even_numb += 1 else: not_even_numb += 1 return f"even: {even_numb}, not even: {not_even_numb}" if __name__ == '__main__': my_list = list(range(1, 20)) print(list_check(my_list))
/hw-1/task_2.py
""" Написать функцию, которая принимает 2 числа. Функция должна вернуть сумму всех элементов числового ряда между этими двумя числами. (если подать 1 и 5 на вход, то результат должен считаться как 1+2+3+4+5=15) """ def all_numbers_sum(num1, num2): return sum([num for num in range(num1, num2 + 1)]) if __name__ == '__main__': print(all_numbers_sum(1, 5))
/hw-1/task_3.py
""" Реализовать алгоритм бинарного поиска на python. На вход подается упорядоченный список целых чисел, а так же элемент, который необходимо найти и указать его индекс, в противном случае – указать что такого элемента нет в заданном списке. """ def search_item(some_list, find_item): some_list.sort() list_length = len(some_list) start = 0 end = list_length - 1 mid = list_length // 2 i = 0 while i < list_length: if find_item == some_list[mid]: return f'Число {some_list[mid]}, найдено по индексу {mid}' elif find_item > some_list[mid]: start = mid + 1 mid = start + (end - start) // 2 else: end = mid - 1 mid = (end - start) // 2 i += 1 else: return f'Числа {find_item} нету в списке!' if __name__ == '__main__': # my_list = list(range(0, 100)) my_list = [1, 23, 33, 54, 42, 77, 234, 99, 2] my_item = 42 print(search_item(my_list, my_item))
/hw-2/task_1.py
""" 1. Написать функцию, которая будет принимать на вход натуральное число n, и возращать сумму его цифр. Реализовать используя рекурсию (без циклов, без строк, без контейнерных типов данных). Пример: get_sum_of_components(123) -> 6 (1+2+3) """ def get_sum_of_components_two(n): return 0 if not n else n % 10 + get_sum_of_components_two(n // 10) if __name__ == '__main__': print(get_sum_of_components_two(123))
/hw-2/task_2.py
""" 2. Написать декоратор log, который будет выводить на экран все аргументы, которые передаются вызываемой функции. @log def my_sum(*args): return sum(*args) my_sum(1,2,3,1) - выведет "Функция была вызвана с - 1, 2, 3, 1" my_sum(22, 1) - выведет "Функция была вызвана с - 22, 1" """ def log(func): def wrapper(*args): res = func(*args) print("Функция была вызвана с - " + ', '.join(map(str, args))) return res return wrapper @log def my_sum(*args): return if __name__ == '__main__': my_sum(11, 2, 3, 's', 4)
/hw-3/task_1.py
""" Реализовать некий класс Matrix, у которого: 1. Есть собственный конструктор, который принимает в качестве аргумента - список списков, копирует его (то есть при изменении списков, значения в экземпляре класса не должны меняться). Элементы списков гарантированно числа, и не пустые. 2. Метод size без аргументов, который возвращает кортеж вида (число строк, число столбцов). 3. Метод transpose, транспонирующий матрицу и возвращающую результат (данный метод модифицирует экземпляр класса Matrix) 4. На основе пункта 3 сделать метод класса create_transposed, который будет принимать на вход список списков, как и в пункте 1, но при этом создавать сразу транспонированную матрицу. https://ru.wikipedia.org/wiki/%D0%A2%D1%80%D0%B0%D0%BD%D1%81%D0%BF%D0%BE%D0%BD%D0%B8%D1%80%D0% """ class Matrix: def __init__(self, some_list): self.data_list = some_list.copy() def size(self): row = len(self.data_list) col = len(self.data_list[0]) return row, col def transpose(self): t_matrix = [ [item[i] for item in self.data_list] for i in range(self.size()[1]) ] self.data_list = t_matrix return self.data_list @classmethod def create_transposed(cls, int_list): obj = cls(int_list) obj.transpose() return obj if __name__ == '__main__': my_list = [[1, 2, 9], [3, 4, 0], [5, 6, 4]] t = Matrix(my_list) t.transpose() print(t.data_list) t2 = Matrix.create_transposed(my_list) print(t2.data_list)
/hw-4/task_1.py
""" К реализованному классу Matrix в Домашнем задании 3 добавить следующее: 1. __add__ принимающий второй экземпляр класса Matrix и возвращающий сумму матриц, если передалась на вход матрица другого размера - поднимать исключение MatrixSizeError (по желанию реализовать так, чтобы текст ошибки содержал размерность 1 и 2 матриц - пример: "Matrixes have different sizes - Matrix(x1, y1) and Matrix(x2, y2)") 2. __mul__ принимающий число типа int или float и возвращающий матрицу, умноженную на скаляр 3. __str__ переводящий матрицу в строку. Столбцы разделены между собой табуляцией, а строки — переносами строк (символ новой строки). При этом после каждой строки не должно быть символа табуляции и в конце не должно быть переноса строки. """ class MatrixSizeError(Exception): pass class Matrix: def __init__(self, some_list): self.data_list = some_list.copy() def __add__(self, other): if self.size() != other.size(): raise MatrixSizeError( f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}' ) return [ [self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])] for row in range(self.size()[0]) ] def __mul__(self, other): return [[item * other for item in row] for row in self.data_list] def __str__(self): return ''.join('%s\n' % '\t'.join(map(str, x)) for x in self.data_list).rstrip('\n') def size(self): row = len(self.data_list) col = len(self.data_list[0]) return row, col def transpose(self): t_matrix = [ [item[i] for item in self.data_list] for i in range(self.size()[1]) ] self.data_list = t_matrix return self.data_list @classmethod def create_transposed(cls, int_list): obj = cls(int_list) obj.transpose() return obj if __name__ == '__main__': list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]] list_2 = [[2, 3, 0], [1, 2, 3], [5, 6, 4]] list_3 = [[2, 3], [1, 2], [5, 6]] t1 = Matrix(list_1) t1.transpose() t2 = Matrix.create_transposed(list_2) t3 = Matrix(list_3) print("t1: ", t1.data_list) print("t2: ", t2.data_list) print("t3: ", t3.data_list) # __add__ print("\nt1.__add__(t2) : ", t1 + t2) try: print("\nПробую: t1 + t3") print(t1 + t3) except MatrixSizeError: print('Тут было вызвано исключение MatrixSizeError') # __mul__ print("\nt2.__mul__(3): \n", t2 * 3) # __str__ print('\nt1.__str__') print(t1)
/hw-5/task_1.py
# Реализовать пример использования паттерна Singleton from random import choice # Генератор событий def gen_events(instance, data, count=2): for i in range(count): event = choice(data) instance.add_event(f'Event-{event}-{i}', event) # Singleton на примере списка событий class EventsMeta(type): _instance = None def __call__(cls): if cls._instance is None: cls._instance = super().__call__() return cls._instance class Events(metaclass=EventsMeta): # __metaclass__ = EventsMeta _events = { 'ok': [], 'info': [], 'warn': [], 'error': [] } def get_all_events(self): """ :return: dict with all events and types """ return self._events def get_events_count(self, key: str = None): """ :param key: if need count of specific type :return: all events count or specific event count if param key: not None :rtype: tuple, int """ if key: try: return len(self._events[key]) # return key, len(self._events[key]) except KeyError: print('Тип события должен быть ' + ', '.join(self._events.keys())) return return tuple((event, len(self._events[event])) for event in self._events.keys()) def add_event(self, event: str, event_type: str): """ :param event: event message :param event_type: ok, info, warn, error :return: None """ try: self._events[event_type].append(event) except KeyError: print('Тип события должен быть ' + ', '.join(self._events.keys())) def read_event(self, event_type: str): """ :param event_type: ok, info, warn, error :return: tuple last item of event_type, all count events or None """ try: return self._events[event_type].pop(), len(self._events[event_type]) except IndexError: print('Событий больше нет') return except KeyError: print('Указан неверный тип события') return @classmethod def get_events_types(cls): return cls._events.keys() if __name__ == '__main__': event_instance1 = Events() event_instance2 = Events() event_instance3 = Events() print(type(event_instance1), id(event_instance1)) print(type(event_instance2), id(event_instance2)) # Генерируем события gen_events(event_instance3, list(event_instance3.get_events_types()), 50) # Получаем все события print(event_instance2.get_all_events()) # Получаем колличества всех типов событий и обределенного типа print(event_instance3.get_events_count()) print(f"Error: {event_instance3.get_events_count('error')}") # Читаем события while event_instance3.get_events_count('ok'): print(event_instance3.read_event('ok'))
/hw-6/task_1.py
""" 1. Реализовать подсчёт елементов в классе Matrix с помощью collections.Counter. Можно реализовать протоколом итератора и тогда будет такой вызов - Counter(maxtrix). Либо сделать какой-то метод get_counter(), который будет возвращать объект Counter и подсчитывать все элементы внутри матрицы. Какой метод - ваш выбор. """ from collections import Counter class MatrixSizeError(Exception): pass class Matrix: def __init__(self, some_list): self.data_list = some_list.copy() self.counter = Counter def __add__(self, other): if self.size() != other.size(): raise MatrixSizeError( f'Matrixes have different sizes - Matrix{self.size()} and Matrix{other.size()}' ) return [ [self.data_list[row][col] + other.data_list[row][col] for col in range(self.size()[1])] for row in range(self.size()[0]) ] def __mul__(self, other): return [[item * other for item in row] for row in self.data_list] def __str__(self): return ''.join('%s\n' % '\t'.join(map(str, x)) for x in self.data_list).rstrip('\n') def get_counter(self): return self.counter(elem for list_elem in self.data_list for elem in list_elem) def size(self): row = len(self.data_list) col = len(self.data_list[0]) return row, col def transpose(self): t_matrix = [ [item[i] for item in self.data_list] for i in range(self.size()[1]) ] self.data_list = t_matrix return self.data_list @classmethod def create_transposed(cls, int_list): obj = cls(int_list) obj.transpose() return obj if __name__ == '__main__': list_1 = [[1, 2, 9], [3, 4, 0], [5, 6, 4]] list_2 = [[2, 3], [1, 2], [5, 6]] matrix1 = Matrix(list_1) matrix2 = Matrix(list_2) print(matrix1.get_counter()) print(matrix2.get_counter())
/hw-6/task_2.py
# 2. Используя модуль unittests написать тесты: сложения двух матриц, умножения матрицы и метод transpose import unittest from .task_1 import Matrix, MatrixSizeError class TestMatrix(unittest.TestCase): def setUp(self) -> None: self.matrix_1 = Matrix([[1, 2, 9], [3, 4, 0], [5, 6, 4]]) self.matrix_2 = Matrix([[2, 3, 0], [1, 2, 3], [5, 6, 4]]) self.matrix_3 = Matrix([[2, 9], [4, 0], [6, 4]]) self.matrix_4 = Matrix([[2, 9], [4, 0], [6, 4]]) def test_add_three(self): self.assertEqual(self.matrix_1 + self.matrix_2, [[3, 5, 9], [4, 6, 3], [10, 12, 8]]) def test_add_two_size(self): self.assertEqual(self.matrix_3 + self.matrix_4, [[4, 18], [8, 0], [12, 8]]) def test_add_error(self): with self.assertRaises(MatrixSizeError): self.matrix_1 + self.matrix_3 def test_mul_integer(self): self.assertEqual(self.matrix_1 * 2, [[2, 4, 18], [6, 8, 0], [10, 12, 8]]) def test_mul_float(self): self.assertEqual(self.matrix_1 * 2.5, [[2.5, 5.0, 22.5], [7.5, 10.0, 0.0], [12.5, 15.0, 10.0]]) def test_transpose_and_transpose_over_transposed_instance(self): self.assertEqual(self.matrix_1.transpose(), [[1, 3, 5], [2, 4, 6], [9, 0, 4]]) self.assertEqual(self.matrix_1.transpose(), [[1, 2, 9], [3, 4, 0], [5, 6, 4]]) if __name__ == '__main__': unittest.main()
/hw-7/task_1.py
""" Сделать скрипт, который будет делать GET запросы на следующие ресурсы: "http://docs.python-requests.org/", "https://httpbin.org/get", "https://httpbin.org/", "https://api.github.com/", "https://example.com/", "https://www.python.org/", "https://www.google.com.ua/", "https://regex101.com/", "https://docs.python.org/3/this-url-will-404.html", "https://www.nytimes.com/guides/", "https://www.mediamatters.org/", "https://1.1.1.1/", "https://www.politico.com/tipsheets/morning-money", "https://www.bloomberg.com/markets/economics", "https://www.ietf.org/rfc/rfc2616.txt" Для каждого запроса должен быть вывод по примеру: "Resource 'google.com.ua', request took 0.23 sec, response status - 200." В реализации нет ограничений - можно использовать процессы, потоки, асинхронность. Любые вспомагательные механизмы типа Lock, Semaphore, пулы для тредов и потоков. """ import aiohttp import asyncio from time import time async def get_response(session, url): async with session.get(url) as resp: return resp.status async def request(url): async with aiohttp.ClientSession() as session: time_start = time() status_code = await get_response(session, url) print(f"Resource '{url}', request took {time() - time_start:.3f}, response status - {status_code}") if __name__ == '__main__': urls = [ "http://docs.python-requests.org/", "https://httpbin.org/get", "https://httpbin.org/", "https://api.github.com/", "https://example.com/", "https://www.python.org/", "https://www.google.com.ua/", "https://regex101.com/", "https://docs.python.org/3/this-url-will-404.html", "https://www.nytimes.com/guides/", "https://www.mediamatters.org/", "https://1.1.1.1/", "https://www.politico.com/tipsheets/morning-money", "https://www.bloomberg.com/markets/economics", "https://www.ietf.org/rfc/rfc2616.txt" ] futures = [request(url) for url in urls] loop = asyncio.get_event_loop() t_start = time() loop.run_until_complete(asyncio.wait(futures)) t_end = time() print(f"Full fetching got {t_end - t_start:.3f} seconds.")
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
xmlabs-io/xmlabs-python
refs/heads/master
{"/xmlabs/aws_lambda/handler.py": ["/xmlabs/aws_lambda/env.py", "/xmlabs/aws_lambda/config.py"], "/tests/test_aws_lambda_integration.py": ["/xmlabs/aws_lambda/handler.py"], "/xmlabs/__init__.py": ["/xmlabs/aws_lambda/handler.py"], "/xmlabs/aws_lambda/__init__.py": ["/xmlabs/aws_lambda/handler.py"]}
└── ├── example │ └── aws_lambda │ └── app.py ├── tests │ ├── test_aws_lambda_integration.py │ ├── test_aws_lambda_settings.py │ └── test_dynaconf.py └── xmlabs ├── __init__.py ├── aws_lambda │ ├── __init__.py │ ├── config.py │ ├── env.py │ └── handler.py └── dynaconf ├── aws_ec2_userdata_loader.py └── aws_ssm_loader.py
/example/aws_lambda/app.py
from xmlabs.aws_lambda import lambda_handler @lambda_handler def main(event, context, config): print(config.STRIPE_API_SECRET_KEY) pass if __name__ == "__main__": main({"headers":{"X-Environment": "dev"}}, {}) main({"headers":{"X-Environment": "prod"}}, {}) main({"headers":{"X-Environment": "dev"}}, {}) main({"headers":{"X-Environment": "dev"}}, {}) main({"headers":{"X-Environment": "prod"}}, {})
/tests/test_aws_lambda_integration.py
import pytest from xmlabs import xmlabs_lambda_handler @xmlabs_lambda_handler def lambda_handler(event, context, config): assert(config) def test_lambda_handler(): lambda_handler({},{})
/tests/test_aws_lambda_settings.py
import pytest from xmlabs.aws_lambda.config import settings def test_xmlabs_aws_lambda_config(): """Assert Settings""" assert settings
/tests/test_dynaconf.py
from dynaconf import Dynaconf def test_dynaconf_settingsenv(): settingsenv = Dynaconf(environments=True) assert settingsenv def test_dynaconf_settings(): settings = Dynaconf() assert settings
/xmlabs/__init__.py
from .aws_lambda import xmlabs_lambda_handler
/xmlabs/aws_lambda/__init__.py
from .handler import xmlabs_lambda_handler
/xmlabs/aws_lambda/config.py
from dynaconf import Dynaconf from dynaconf.constants import DEFAULT_SETTINGS_FILES LOADERS_FOR_DYNACONF = [ 'dynaconf.loaders.env_loader', #Inorder to configure AWS_SSM_PREFIX we need to load it from environment 'xmlabs.dynaconf.aws_ssm_loader', 'dynaconf.loaders.env_loader', #Good to load environment last so that it takes precedenceover other config ] ENVIRONMENTS= ['prod','dev','stage'] settings = Dynaconf( #settings_files=['settings.toml', '.secrets.toml'], warn_dynaconf_global_settings = True, load_dotenv = True, default_settings_paths = DEFAULT_SETTINGS_FILES, loaders = LOADERS_FOR_DYNACONF, envvar_prefix= "APP", env_switcher = "APP_ENV", env='dev', environments=ENVIRONMENTS, #environments=True, ) def xmlabs_settings(env): return settings.from_env(env)
/xmlabs/aws_lambda/env.py
import os import logging logger = logging.getLogger() def get_environment(event, context=None): valid_envs = ["stage", "prod", "dev"] env = None # default_env = os.getenv("DEFAULT_ENV", "dev") default_env = os.getenv("APP_ENV", os.getenv("DEFAULT_ENV", "dev")) override_env = os.getenv("ENV") if override_env: logger.info("Overriding Environment with {}".format(override_env)) return override_env #################################### ### X-Environment ### ### (override) ### #################################### if event.get('headers'): if event['headers'].get("X-Environment"): return event['headers']['X-Environment'].lower() #################################### ### if lambda function arn ### #################################### split_arn = None try: split_arn = context.invoked_function_arn.split(':') except Exception as ex: split_arn = None if split_arn: #################################### ### lambda function arn alias ### ### (preferred) ### #################################### e = split_arn[len(split_arn) - 1] if e in valid_envs: env = e return env.lower() ####################################### ### Lambda Function Name Evaluation ### ####################################### split_fn = split_arn[6].split("_") if split_fn[-1].lower() in valid_envs: return split_fn[-1].lower() #################################### ### Stage Variable Evaluation ### #################################### apiStageVariable = None if event.get("stageVariables"): apiStageVariable = event["stageVariables"].get("env") env = apiStageVariable apiStage = None if event.get("requestContext"): apiStage = event["requestContext"].get("stage") if not env: env = apiStage if apiStage and apiStageVariable and apiStage != apiStageVariable: logger.warning("Tentrr: Using different api GW stagename and api Stage Variable is not recommended") if env: return env.lower() # If invoked without alias if (not split_arn or len(split_arn) == 7) and default_env: return default_env else: raise Exception("Environment could not be determined") return None
/xmlabs/aws_lambda/handler.py
from .config import xmlabs_settings from .env import get_environment from functools import wraps def xmlabs_lambda_handler(fn): @wraps(fn) def wrapped(*args, **kwargs): env, config = None , None try: env = get_environment(*args, **kwargs) if not env: raise Exception("No Environment detected") except Exception as ex: ## TODO: Improve Exception catching here ## TODO: Log to cloudwatch that Getting environment failed raise try: config = xmlabs_settings(env) if not config: raise Exception("No Configuration found") except Exception as ex: ## TODO: Improve Exception catching ## TODO: Log to cloudwatch that Retrieving Settings failed raise ## Standard Invoke logging for #lambda_invoke_logger(*args, **kwargs) try: return fn(*args, **kwargs, config=config) except Exception as ex: # Make a standard error log to Cloudwatch for eas of capturing raise return wrapped
/xmlabs/dynaconf/aws_ec2_userdata_loader.py
from .base import ConfigSource import logging import requests logger = logging.getLogger() class ConfigSourceAwsEc2UserData(ConfigSource): def load(self): if self._running_in_ec2(): #TODO: fetch EC2 USERDATA raise Exception("ConfigSourceEC2UserData Load Unimplemented") def _running_in_ec2(self): try: # Based on https://gist.github.com/dryan/8271687 instance_ip_url = "http://169.254.169.254/latest/meta-data/local-ipv4" requests.get(instance_ip_url, timeout=0.01) return True except requests.exceptions.RequestException: return False
/xmlabs/dynaconf/aws_ssm_loader.py
import boto3 import logging import requests from functools import lru_cache from dynaconf.utils.parse_conf import parse_conf_data logger = logging.getLogger() IDENTIFIER = 'aws_ssm' def load(obj, env=None, silent=True, key=None, filename=None): """ Reads and loads in to "obj" a single key or all keys from source :param obj: the settings instance :param env: settings current env (upper case) default='DEVELOPMENT' :param silent: if errors should raise :param key: if defined load a single key, else load all from `env` :param filename: Custom filename to load (useful for tests) :return: None """ # Load data from your custom data source (file, database, memory etc) # use `obj.set(key, value)` or `obj.update(dict)` to load data # use `obj.find_file('filename.ext')` to find the file in search tree # Return nothing prefix = "" if obj.get("AWS_SSM_PREFIX"): prefix = "/{}".format(obj.AWS_SSM_PREFIX) path = "{}/{}/".format(prefix, env.lower()) if key: path = "{}{}/".format(path, key) data = _read_aws_ssm_parameters(path) try: if data and key: value = parse_conf_data( data.get(key), tomlfy=True, box_settings=obj) if value: obj.set(key, value) elif data: obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True) except Exception as e: if silent: return False raise @lru_cache def _read_aws_ssm_parameters(path): logger.debug( "Reading settings AWS SSM Parameter Store (Path = {}).".format(path) ) print( "Reading settings AWS SSM Parameter Store (Path = {}).".format(path) ) result = {} try: ssm = boto3.client("ssm") response = ssm.get_parameters_by_path( Path=path, Recursive=True, WithDecryption=True ) while True: params = response["Parameters"] for param in params: name = param["Name"].replace(path, "").replace("/", "_") value = param["Value"] result[name] = value if "NextToken" in response: response = ssm.get_parameters_by_path( Path=path, Recursive=True, WithDecryption=True, NextToken=response["NextToken"], ) else: break except Exception as ex: print( "ERROR: Trying to read aws ssm parameters (for {}): {}!".format( path, str(ex) ) ) result = {} logger.debug("Read {} parameters.".format(len(result))) return result
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
acheng6845/PuzzleSolver
refs/heads/master
{"/PADScreen.py": ["/Board_Screen.py", "/PAD_Monster.py", "/PAD_Team.py", "/Calculator_Screen.py"], "/Board_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/Calculator_Screen.py": ["/PAD_Monster.py", "/PAD_Team.py"], "/PAD_Team.py": ["/PAD_Monster.py"], "/PAD_GUI.py": ["/PADScreen.py"]}
└── ├── Board_Screen.py ├── Calculator_Screen.py ├── PADCompleter.py ├── PADScreen.py ├── PAD_GUI.py ├── PAD_Monster.py ├── PAD_Team.py └── image_updater.py
/Board_Screen.py
__author__ = 'Aaron' from PyQt5.QtWidgets import (QVBoxLayout, QWidget, QLabel, QGridLayout, QSplitter, QPushButton, QHBoxLayout) from PyQt5.QtCore import Qt, QMimeData from PyQt5.QtGui import QPixmap, QDrag import os from PAD_Monster import PADMonster from PAD_Team import PADTeam from functools import partial class BoardScreen(QVBoxLayout): default_team = [PADMonster() for monster in range(6)] default_team_totals = PADTeam(default_team) def __init__(self, gui, team=default_team, team_totals=default_team_totals): super().__init__() self.team = team self.team_totals = team_totals self.damage_array = [[{'main attribute': 0, 'sub attribute': 0} for col in range(2)] for row in range(6)] self.__init__screen__(gui, self.team, self.team_totals) def __init__screen__(self, gui, team, team_totals): # DAMAGE SCREEN damage_screen = QWidget() damage_screen_layout = QGridLayout() damage_screen.setLayout(damage_screen_layout) self.addWidget(damage_screen) self.damage_labels = [[QLabel(gui) for column in range(2)] for row in range(6)] for row in range(6): for column in range(2): damage_screen_layout.addWidget(self.damage_labels[row][column], row, column) # RECOVERY LABEL self.hp_recovered = QLabel(gui) self.addWidget(self.hp_recovered) # BOARD board = QWidget() board_layout = QGridLayout() board.setLayout(board_layout) self.addWidget(board) # TEAM IMAGES self.team_labels = [] for index in range(6): label = QLabel(gui) self.team_labels.append(label) board_layout.addWidget(label, 0, index) board_layout.setAlignment(label, Qt.AlignHCenter) self.set__team(team) # BOARD self.board_labels = [[PADLabel(gui) for column in range(8)] for row in range(8)] # positions = [(i+1, j) for i in range(8) for j in range(8)] light_brown = 'rgb(120, 73, 4)' dark_brown = 'rgb(54, 35, 7)' color = dark_brown for row in self.board_labels: for column in row: row_index = self.board_labels.index(row) col_index = row.index(column) column.setStyleSheet("QLabel { background-color: %s }" % color) if color == dark_brown and (col_index+1) % 8 != 0: color = light_brown elif color == light_brown and (col_index+1) % 8 != 0: color = dark_brown board_layout.addWidget(column, row_index+1, col_index) #for position, label in zip(positions, self.board_labels): # board_layout.addWidget(label, *position) for row in range(9): board_layout.setRowStretch(row, 1) for column in range(8): board_layout.setColumnStretch(column, 1) self.board_array = [] self.__create__board___(5, 6) # CALCULATE DAMAGE BUTTON calculate_damage_button = QPushButton('Calculate Damage', gui) calculate_damage_button.clicked.connect(partial(self.calculate_damage, team, team_totals)) self.addWidget(calculate_damage_button) # ORBS # orb_wrapper = QWidget(gui) # orb_wrapper_layout = QHBoxLayout() # orb_wrapper.setLayout(orb_wrapper_layout) # elements = ['fire', 'water', 'wood', 'light', 'dark'] # for element in elements: # orb = PADIcon(gui) # orb.setPixmap(QPixmap(os.path.join('icons')+'\\'+element+'.png')) # orb_wrapper_layout.addWidget(orb) # # self.addWidget(orb_wrapper) def __create__board___(self, row, column): self.board_array = [['' for column in range(column)] for row in range(row)] for row_index in self.board_labels: for col_label in row_index: col_label.hide() for x in range(row): for y in range(column): self.board_labels[x][y].show() def calculate_damage(self, team=default_team, team_totals=default_team_totals): for row in range(len(self.board_array)): for column in range(len(self.board_array[0])): self.board_array[row][column] = self.board_labels[row][column].element all_positions = set() # 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = heart elemental_damage = [{'fire': 0, 'water': 0, 'wood': 0, 'light': 0, 'dark': 0} for monster in range(6)] total_hp_recovered = 0 combo_count = 0 colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'pink'] attribute_translator = ['fire', 'water', 'wood', 'light', 'dark', 'heart'] for row in range(len(self.board_array)): for column in range(len(self.board_array[0])): combo_length, positions = self.__find__combos__recursively__(self.board_array, row, column) if combo_length >= 3 and not next(iter(positions)) in all_positions and self.board_array[row][column]: print(str(self.board_array[row][column])+":",combo_length,'orb combo.') attribute = attribute_translator.index(self.board_array[row][column]) if attribute != 5: for monster in range(6): if combo_length == 4: damage = team[monster].pronged_atk[attribute] * 1.25 else: damage = team[monster].atk[attribute] * (1+0.25*(combo_length-3)) elemental_damage[monster][self.board_array[row][column]] += damage else: total_rcv = 0 for monster in range(6): total_rcv += team[monster].rcv total_hp_recovered += total_rcv * (1+0.25*(combo_length-3)) print(total_hp_recovered) print(total_rcv) all_positions |= positions combo_count += 1 combo_multiplier = 1+0.25*(combo_count-1) for monster in range(6): main_attribute = attribute_translator[team[monster].attr_main] sub_attribute = '' if team[monster].attr_sub or team[monster].attr_sub == 0: sub_attribute = attribute_translator[team[monster].attr_sub] if sub_attribute: if main_attribute != sub_attribute: main_damage = elemental_damage[monster][main_attribute] * combo_multiplier sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier else: main_damage = elemental_damage[monster][main_attribute] * combo_multiplier * (10/11) sub_damage = elemental_damage[monster][sub_attribute] * combo_multiplier * (1/11) else: main_damage = elemental_damage[monster][main_attribute] * combo_multiplier sub_damage = 0 self.damage_labels[monster][0].setText(str(main_damage)) self.damage_labels[monster][0].setStyleSheet("QLabel { color : %s }" % colors[team[monster].attr_main]) self.damage_labels[monster][1].setText(str(sub_damage)) if team[monster].attr_sub or team[monster].attr_sub == 0: self.damage_labels[monster][1].setStyleSheet("QLabel { color : %s }" % colors[team[monster].attr_sub]) total_hp_recovered *= combo_multiplier self.hp_recovered.setText(str(total_hp_recovered)) self.hp_recovered.setStyleSheet("QLabel { color : %s }" % colors[5]) def set__team(self, team): for label, member in zip(self.team_labels, team): try: image = QPixmap(os.path.join('images')+'/'+member.name+'.png') image.scaled(75, 75) label.setPixmap(image) except Exception: pass def __find__combos__recursively__(self, array, row, column): combo_length = 0 positions = set() row_length = self.checkIndexInRow(array, row, column) if row_length >= 3: more_length, more_positions = self.__find__combos__recursively__(array, row, column+row_length-1) combo_length += row_length + more_length - 1 positions |= more_positions for col_index in range(row_length): positions.add((row, column+col_index)) column_length = self.checkIndexInColumn(array, row, column) if column_length >= 3: more_length, more_positions = self.__find__combos__recursively__(array, row+column_length-1, column) combo_length += column_length + more_length - 1 positions |= more_positions for row_index in range(column_length): positions.add((row+row_index, column)) if row_length >= 3 and column_length >= 3: return combo_length - 1, positions elif row_length < 3 and column_length < 3: return 1, positions return combo_length, positions def checkIndexInRow(self, array, row, col_index): combo_length = 0 if array[row].count(array[row][col_index]) >= 3: if col_index > 0: if array[row][col_index - 1] != array[row][col_index]: combo_length += self.recurseThroughRow(array, row, col_index) else: combo_length += self.recurseThroughRow(array, row, col_index) return combo_length def recurseThroughRow(self, array, row, col_index, count=1): if array[row][col_index + count] == array[row][col_index]: count += 1 if col_index + count < len(array[row]): return self.recurseThroughRow(array, row, col_index, count) else: return count else: return count def checkIndexInColumn(self, array, row_index, col): elements_in_column = [] combo_length = 0 for index in range(row_index, len(array)): elements_in_column.append(array[index][col]) if elements_in_column.count(array[row_index][col]) >= 3: if row_index > 0: if array[row_index][col] != array[row_index - 1][col]: combo_length += self.recurseThroughCol(array, row_index, col) else: combo_length += self.recurseThroughCol(array, row_index, col) return combo_length def recurseThroughCol(self, array, row_index, col, count=1): if array[row_index + count][col] == array[row_index][col]: count += 1 if row_index + count < len(array): return self.recurseThroughCol(array, row_index, col, count) else: return count else: return count class PADLabel(QLabel): def __init__(self, gui): super().__init__(gui) self.setAcceptDrops(True) self.setMouseTracking(True) self.setScaledContents(True) self.color_counter = -1 self.colors = ['fire', 'water', 'wood', 'light', 'dark', 'heart'] self.element = '' self.setFixedSize(75, 75) def mousePressEvent(self, click): if click.button() == Qt.LeftButton and self.rect().contains(click.pos()): if self.color_counter != 5: self.color_counter += 1 else: self.color_counter = 0 self.element = self.colors[self.color_counter] icon = QPixmap(os.path.join('icons')+'/'+self.element+'.png') icon.scaled(75, 75) self.setPixmap(icon) def dragEnterEvent(self, event): if event.mimeData().hasImage(): event.accept() else: event.ignore() def dropEvent(self, event): image = event.mimeData().imageData().value<QImage>() self.setPixmap(image) class PADIcon(QLabel): def __init__(self, gui): super().__init__() self.gui = gui self.setMouseTracking(True) self.location = self.rect() def mousePressEvent(self, click): if click.button() == Qt.LeftButton and self.rect().contains(click.pos()): print('On it!') drag = QDrag(self.gui) mimeData = QMimeData() mimeData.setImageData(self.pixmap().toImage()) drag.setMimeData(mimeData) drag.setPixmap(self.pixmap()) dropAction = drag.exec()
/Calculator_Screen.py
__author__ = 'Aaron' # Class Description: # Create framework for the split screens used in PAD_GUI # import necessary files import os import json from functools import partial from PyQt5.QtWidgets import (QLabel, QWidget, QHBoxLayout, QFrame, QSplitter, QStyleFactory, QGridLayout, QLineEdit, QPushButton, QVBoxLayout, QCompleter, QComboBox, QScrollArea, QToolTip) from PyQt5.QtGui import QPixmap, QColor, QFont from PyQt5.QtCore import Qt, QStringListModel from PAD_Monster import PADMonster from PAD_Team import PADTeam class CalculatorScreen(QHBoxLayout): def __init__(self, gui): super().__init__() # 0 = lead1, 1 = sub1,..., 5 = lead2 self.team = [PADMonster() for x in range(6)] self.pad_team = PADTeam(self.team) # keeps old team stats before modification from leader multipliers self.team_base = [PADMonster() for x in range(6)] # open monsters.txt and load it into a python object using json # self.json_file = requests.get('https://padherder.com/api/monsters') self.json_file = open(os.path.join('.\monsters.txt'), 'r') self.json_monsters = json.loads(self.json_file.read()) # print(self.json_monsters[0]["name"]) self.completer_string_list_model = QStringListModel() array_of_monster_names = [] for x in range(len(self.json_monsters)): array_of_monster_names.append(self.json_monsters[x]["name"]) self.completer_string_list_model.setStringList(array_of_monster_names) # checks if the modified button has been pressed so other functions can know which stat to display self.is_pressed = False QToolTip.setFont(QFont('SansSerif', 10)) self.init_screen(gui) def init_screen(self, gui): # add things to top of the screen here (Monitor section)! # Create an overarching top widget/layout supreme_top_box = QWidget() supreme_top_box_layout = QVBoxLayout() supreme_top_box.setLayout(supreme_top_box_layout) # Monitor section will have labels inside of a grid layout top_box = QWidget() grid = QGridLayout() top_box.setLayout(grid) supreme_top_box_layout.addWidget(top_box) # Creates lists of labels, initially having only static labels and having # the tangible labels substituted with '' static_labels = ['', '', '', '', '', '', '', '', '', 'Lead 1', 'Sub 1 ', 'Sub 2 ', 'Sub 3 ', 'Sub 4 ', 'Lead 2', 'Team Totals', 'Type:', '', '', '', '', '', '', '', 'HP:', 0, 0, 0, 0, 0, 0, 0, 'Atk:', 0, 0, 0, 0, 0, 0, 0, 'Pronged Atk:', 0, 0, 0, 0, 0, 0, 0, 'RCV:', 0, 0, 0, 0, 0, 0, 0, 'Awakenings:', '', '', '', '', '', '', ''] self.display_labels = [QLabel(gui) for x in range(len(static_labels))] for s_label, d_label in zip(static_labels, self.display_labels): if s_label == '': continue d_label.setText(str(s_label)) positions = [(i, j) for i in range(8) for j in range(8)] for position, d_label in zip(positions, self.display_labels): # why *position? because the array is [(i,j), (i,j),...,(i,j)] grid.addWidget(d_label, *position) grid.setAlignment(d_label, Qt.AlignHCenter) self.leader_skills_labels = [QLabel(gui) for x in range(2)] for x in range(2): self.leader_skills_labels[x].setText('Leader Skill '+str(x+1)+': ') supreme_top_box_layout.addWidget(self.leader_skills_labels[x]) # Create another row of labels for Awoken Skills Image Lists # Create another row of labels to show the Leader Skill Multipliers ######################################################################## # add things to bottom of the screen here (Input section)! # Input section will be split in two: have LineEdits in a grid layout and then PushButtons in a separate grid # layout bottom_box = QWidget() grid2 = QGridLayout() bottom_box.setLayout(grid2) bottom_labels_text = ['Leader 1', 'Sub 1', 'Sub 2', 'Sub 3', 'Sub 4', 'Leader 2'] bottom_labels = [QLabel(gui) for x in range(6)] instruction_labels_text = ['Please enter the name here:', 'Enter level here:', 'Enter pluses here:'] instruction_labels = [QLabel(gui) for x in range(3)] self.line_edits = [QLineEdit(gui) for x in range(6)] line_edit_completer = QCompleter() line_edit_completer.setCaseSensitivity(Qt.CaseInsensitive) line_edit_completer.setFilterMode(Qt.MatchContains) line_edit_completer.setModel(self.completer_string_list_model) # Combo Boxes for Levels and Pluses level_boxes = [QComboBox(gui) for x in range(6)] self.plus_boxes_types = [QComboBox(gui) for x in range(6)] self.plus_boxes_values = [QComboBox(gui) for x in range(6)] for x in range(6): for n in range(0,100): if n != 0 and n <= self.team[x].max_level: level_boxes[x].addItem(str(n)) self.plus_boxes_values[x].addItem(str(n)) self.plus_boxes_types[x].addItem('hp') self.plus_boxes_types[x].addItem('atk') self.plus_boxes_types[x].addItem('rcv') self.plus_boxes_values[x].hide() # add the labels and line_edits to the bottom grid for x in range(6): bottom_labels[x].setText(bottom_labels_text[x]) bottom_labels[x].adjustSize() grid2.addWidget(bottom_labels[x], *(x+1, 0)) grid2.addWidget(self.line_edits[x], *(x+1, 1)) grid2.addWidget(level_boxes[x], *(x+1, 2)) grid2.addWidget(self.plus_boxes_types[x], *(x+1, 3)) grid2.addWidget(self.plus_boxes_values[x], *(x+1, 3)) self.line_edits[x].textChanged[str].connect(partial(self._on_changed_, x)) self.line_edits[x].setCompleter(line_edit_completer) self.line_edits[x].setMaxLength(50) level_boxes[x].activated[str].connect(partial(self._on_level_activated_, x)) self.plus_boxes_types[x].activated[str].connect(partial(self._on_plus_type_activated_, x)) for x in range(3): instruction_labels[x].setText(instruction_labels_text[x]) instruction_labels[x].adjustSize() grid2.addWidget(instruction_labels[x], *(0, x+1)) ########################################################################### # create the button widgets in a separate widget below bottom_box below_bottom_box = QWidget() grid3 = QGridLayout() below_bottom_box.setLayout(grid3) # create a set of buttons below the line_edits: # White(Base) Red Blue Green Yellow Purple buttons = [] button_labels = ['Fire', 'Water', 'Wood', 'Light', 'Dark', 'Base'] button_colors = ['red', 'lightskyblue', 'green', 'goldenrod', 'mediumpurple', 'white'] for x in range(6): buttons.append(QPushButton(button_labels[x], gui)) buttons[x].clicked.connect(partial(self._handle_button_, x)) buttons[x].setStyleSheet('QPushButton { background-color : %s }' % button_colors[x]) grid3.addWidget(buttons[x], *(0, x)) # create a QHBoxLayout widget that holds the page turners and toggle page_turner = QWidget() page_turner_layout = QHBoxLayout() page_turner.setLayout(page_turner_layout) # create the page turner and toggle widgets page_turner_layout.addStretch() self.toggle_button = QPushButton('Toggle On Modified Stats', gui) self.toggle_button.setCheckable(True) self.toggle_button.clicked[bool].connect(self._handle_toggle_button_) page_turner_layout.addWidget(self.toggle_button) page_turner_layout.addStretch() # Create overarching bottom widget supreme_bottom_box = QWidget() supreme_bottom_box_layout = QVBoxLayout() supreme_bottom_box.setLayout(supreme_bottom_box_layout) button_label = QLabel('Select from below the attribute you would like to display.') supreme_bottom_box_layout.setAlignment(button_label, Qt.AlignHCenter) supreme_bottom_box_layout.addWidget(bottom_box) supreme_bottom_box_layout.addWidget(button_label) supreme_bottom_box_layout.addWidget(below_bottom_box) supreme_bottom_box_layout.addWidget(page_turner) # Add the two screens into a split screen splitter = QSplitter(Qt.Vertical) splitter.addWidget(supreme_top_box) splitter.addWidget(supreme_bottom_box) # Add the split screen to our main screen self.addWidget(splitter) def _create_monster_(self, index, dict_index, name): """ When a valid name has been entered into the line edits, create a PADMonster Class using the values stored in the json dictionary and save the PADMonster to the appropriate index in the team array and PADTeam Class subsequently. :param index: 0 = lead 1, 1 = sub 1, 2 = sub 2, 3 = sub 3, 4 = sub 4, 5 = lead 2 :param dict_index: the index in the json dictionary containing the monster :param name: the monster's name """ self.team[index] = PADMonster() self.team_base[index] = PADMonster() hp_max = self.json_monsters[dict_index]["hp_max"] atk_max = self.json_monsters[dict_index]["atk_max"] rcv_max = self.json_monsters[dict_index]["rcv_max"] attr1 = self.json_monsters[dict_index]["element"] attr2 = self.json_monsters[dict_index]["element2"] type1 = self.json_monsters[dict_index]["type"] type2 = self.json_monsters[dict_index]["type2"] image60_size = self.json_monsters[dict_index]["image60_size"] image60_href = self.json_monsters[dict_index]["image60_href"] awakenings = self.json_monsters[dict_index]["awoken_skills"] leader_skill_name = self.json_monsters[dict_index]["leader_skill"] max_level = self.json_monsters[dict_index]["max_level"] hp_min = self.json_monsters[dict_index]["hp_min"] atk_min = self.json_monsters[dict_index]["atk_min"] rcv_min = self.json_monsters[dict_index]["rcv_min"] hp_scale = self.json_monsters[dict_index]["hp_scale"] atk_scale = self.json_monsters[dict_index]["atk_scale"] rcv_scale = self.json_monsters[dict_index]["rcv_scale"] # use PAD_Monster's function to set our monster's stats self.team[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1, type2, image60_size, image60_href, awakenings, leader_skill_name, max_level, hp_min, hp_scale, atk_min, atk_scale, rcv_min, rcv_scale) # create a PADTeam Class according to our team of Six PADMonster Classes self.pad_team = PADTeam(self.team) # set our labels according to our monsters self._set_labels_(self.team[index], index) # save our team for future modifications: self.team_base[index].set_base_stats(name, hp_max, atk_max, rcv_max, attr1, attr2, type1, type2, image60_size, image60_href, awakenings, leader_skill_name, max_level, hp_min, hp_scale, atk_min, atk_scale, rcv_min, rcv_scale) def _set_labels_(self, monster, index): """ Set the labels according to the values in the indexed PADMonster Class :param monster: the PADMonster associated with the index :param index: the index associated with the PADMonster [0-5] """ # extract and display image self.display_labels[index + 1].setPixmap(QPixmap(os.path.join('images') + '/' + monster.name + '.png')) # display name font = QFont() font.setPointSize(5) type_text = monster.type_main_name+'/'+monster.type_sub_name self.display_labels[index + 17].setText(type_text) self.display_labels[index + 17].setFont(font) self.display_labels[index + 17].adjustSize() self.display_labels[index + 17].setToolTip(type_text) # display hp hp = monster.hp # if modified by leader skills button has been pressed, multiply monster's stat by its # respective index in the stats modified variable of the PADTeam Class if self.is_pressed: hp *= self.pad_team.stats_modified_by[index][0] # if plus values have been set, display how many if monster.hp_plus > 0: self.display_labels[index + 25].setText(str(round(hp)) + ' (+' + str(monster.hp_plus) + ')') else: self.display_labels[index + 25].setText(str(round(hp))) self.display_labels[index + 25].adjustSize() # display attack and pronged attack of main element self._set_attack_labels_(index, 5, monster.atk[monster.attr_main], monster.pronged_atk[monster.attr_main], monster.base_atk_plus) # display rcv rcv = monster.rcv # if modified by leader skills button has been pressed, multiply monster's stat by its # respective index in the stats modified variable of the PADTeam Class if self.is_pressed: rcv *= self.pad_team.stats_modified_by[index][2] # if plus values have been set, display how many if monster.rcv_plus > 0: self.display_labels[index + 49].setText(str(round(rcv)) + ' (+' + str(monster.rcv_plus) + ')') else: self.display_labels[index + 49].setText(str(round(rcv))) self.display_labels[index + 49].adjustSize() # display awakenings awakenings_text = '' awakenings_font = QFont() awakenings_font.setPointSize(6) for x in range(len(monster.awakenings)): if monster.awakenings[x][2] > 0: awakenings_text += monster.awakenings[x][0]+': '+str(monster.awakenings[x][2])+'\n' # set awakenings string to a tooltip since it can't fit into the grid self.display_labels[index + 57].setText('Hover Me!') self.display_labels[index + 57].setFont(awakenings_font) self.display_labels[index + 57].adjustSize() self.display_labels[index + 57].setToolTip(awakenings_text) # calculate and change our display labels for team total values with each change in monster self._set_team_labels_() # if the monster is in the first or last index, it's considered the leader and its leader skill name # and effect are displayed accordingly. if index == 0: text = 'Leader Skill 1: '+self.team[0].leader_skill_name+' > '+self.team[0].leader_skill_desc # if the string is too long, splice it up if len(text) > 50: divider = len(text)//2 # separate the string at a part that is a whitespace while text[divider] != ' ': divider += 1 final_text = text[:divider]+'\n'+text[divider:] else: final_text = text self.leader_skills_labels[0].setText(final_text) elif index == 5: text = 'Leader Skill 1: '+self.team[5].leader_skill_name+' > '+self.team[5].leader_skill_desc # if the string is too long, splice it up if len(text) > 50: divider = len(text)//2 # separate the string at a part that is a whitespace while text[divider] != ' ': divider += 1 final_text = text[:divider]+'\n'+text[divider:] else: final_text = text self.leader_skills_labels[1].setText(final_text) def _set_attack_labels_(self, index, color_num, atk_value, pronged_atk_value, plus_value = 0): """ Set the attack labels according to the values given. :param index: the index of the PADMonster [0-5] and 6 = the team total :param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base :param atk_value: the value to be displayed in the attack label :param pronged_atk_value: the value to be displayed in the pronged attack label :param plus_value: the amount of pluses is set to 0 initially """ # an array holding the colors associated with each value of color_num colors = ['red', 'blue', 'green', 'goldenrod', 'purple', 'black'] # if modified by leader skills button has been pressed, multiply monster's stat by its # respective index in the stats modified variable of the PADTeam Class if self.is_pressed and index != 6: atk_value *= self.pad_team.stats_modified_by[index][1] pronged_atk_value *= self.pad_team.stats_modified_by[index][1] # display attack of main element if plus_value > 0: self.display_labels[index + 33].setText(str(round(atk_value)) + ' (+' + str(plus_value) + ')') else: self.display_labels[index + 33].setText(str(round(atk_value))) self.display_labels[index + 33].setStyleSheet("QLabel { color : %s }" % colors[color_num]) self.display_labels[index + 33].adjustSize() # display pronged attack of main element self.display_labels[index + 41].setText(str(round(pronged_atk_value))) self.display_labels[index + 41].setStyleSheet("QLabel {color : %s }" % colors[color_num]) self.display_labels[index + 41].adjustSize() def _set_team_labels_(self): """ Access the PADTeam Class to extract the values to be displayed in the Team Totals Labels """ # initialize objects to store the total values hp_total = self.pad_team.hp atk_total = self.pad_team.base_atk pronged_atk_total = self.pad_team.base_pronged_atk rcv_total = self.pad_team.rcv total_awakenings = self.pad_team.awakenings # if the modified by leader skills button is pressed, use the team's modified stats instead if self.is_pressed: hp_total = self.pad_team.hp_modified atk_total = self.pad_team.base_atk_modified pronged_atk_total = self.pad_team.base_pronged_atk_modified rcv_total = self.pad_team.rcv_modified # display our total value objects on our labels self.display_labels[31].setText(str(round(hp_total))) self.display_labels[31].adjustSize() self._set_attack_labels_(6, 5, atk_total, pronged_atk_total) self.display_labels[55].setText(str(round(rcv_total))) self.display_labels[55].adjustSize() # set the label containing the team's total awakenings to a tooltip since it won't fit awakenings_font = QFont() awakenings_font.setPointSize(6) self.display_labels[63].setText('Hover Me!') self.display_labels[63].setFont(awakenings_font) self.display_labels[63].adjustSize() self.display_labels[63].setToolTip(total_awakenings) def _get_total_attr_attack_(self, attr): """ Returns the values stored in PADTeam for the Team's Total Attacks and Pronged Attacks for the specified element or the sum of all the element's attacks (BASE) :param attr: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base :return: """ # if we're not looking for the base values a.k.a. sum of all the values if attr != 5: if not self.is_pressed: atk_total = self.pad_team.atk[attr] pronged_atk_total = self.pad_team.pronged_atk[attr] else: atk_total = self.pad_team.atk_modified[attr] pronged_atk_total = self.pad_team.pronged_atk_modified[attr] # if we're looking for the base values else: if not self.is_pressed: atk_total = self.pad_team.base_atk pronged_atk_total = self.pad_team.base_pronged_atk else: atk_total = self.pad_team.base_atk_modified pronged_atk_total = self.pad_team.base_pronged_atk_modified return atk_total, pronged_atk_total # when line_edits are altered, activate this line code according to the text in the line def _on_changed_(self, index, text): """ When a line edit is altered, check the text entered to see if it matches with any of the names in the json dictionary and create a PADMonster at the appropriate index in the team array if the name is found. :param index: the index of the line edit corresponding to the index of the PADMonster in the team array. :param text: the text currently inside the line edit """ for x in range(len(self.json_monsters)): if text == self.json_monsters[x]["name"]: self._create_monster_(index, x, text) elif text.title() == self.json_monsters[x]["name"]: self._create_monster_(index, x, text.title()) def _handle_button_(self, color_num, pressed): """ Only show the Attack and Pronged Attack values of the appropriate element or sum of the elements if BASE is chosen. :param color_num: 0 = fire, 1 = water, 2 = wood, 3 = light, 4 = dark, 5 = base :param pressed: useless event input """ for index in range(6): if color_num == 5: self._set_attack_labels_(index, color_num, self.team[index].atk[self.team[index].attr_main], self.team[index].pronged_atk[self.team[index].attr_main]) else: self._set_attack_labels_(index, color_num, self.team[index].atk[color_num], self.team[index].pronged_atk[color_num]) atk_total, pronged_atk_total = self._get_total_attr_attack_(color_num) self._set_attack_labels_(6, color_num, atk_total, pronged_atk_total) def _handle_toggle_button_(self, pressed): """ If the modify stats by leader skills button is pressed, modify the button's text, set the Class Variable is_pressed to True/False accordingly, and reset the labels now that is_pressed has been changed. :param pressed: Useless event input. """ if pressed: self.is_pressed = True self.toggle_button.setText('Toggle Off Modified Stats') else: self.is_pressed = False self.toggle_button.setText('Toggle On Modified Stats') for monster in range(6): self._set_labels_(self.team[monster], monster) def _on_level_activated_(self, index, level): """ If a level for the PADMonster has been selected, change the monster's base stats according to that level, reset pad_team according to these new values and reset labels accordingly. :param index: PADMonster's index in the team array. [0-5] :param level: the level the PADMonster will be set to """ self.team[index]._set_stats_at_level_(int(level)) self.team_base[index]._set_stats_at_level_(int(level)) self.pad_team = PADTeam(self.team) for monster in range(6): self._set_labels_(self.team[monster], monster) def _on_plus_type_activated_(self, index, text): """ If hp, atk, or rcv has been selected in the drop down menu, hide the menu asking for the type and show the menu asking for the value of pluses between 0-99. :param index: PADMonster's index in the team array. [0-5] :param text: 'hp', 'atk', or 'rcv' """ self.plus_boxes_types[index].hide() self.plus_boxes_values[index].show() try: self.plus_boxes_values[index].activated[str].disconnect() except Exception: pass self.plus_boxes_values[index].activated[str].connect(partial(self._on_plus_value_activated_, index, text)) self.plus_boxes_types[index].disconnect() def _on_plus_value_activated_(self, index, type, value): """ If the value pertaining to the specified type has been selected, modify the appropriate stat of the indexed PADMonster according the specified amount of pluses, reset the pad_team according to the modified stats, and redisplay the new values :param index: PADMonster's index in the team array. [0-5] :param type: 'hp', 'atk', or 'rcv' :param value: the value, 0-99, of pluses the PADMonster has for the specified type """ self.plus_boxes_types[index].show() self.plus_boxes_types[index].activated[str].connect(partial(self._on_plus_type_activated_, index)) self.plus_boxes_values[index].hide() self.team[index]._set_stats_with_pluses_(type, int(value)) self.team_base[index]._set_stats_with_pluses_(type, int(value)) self.pad_team = PADTeam(self.team) for monster in range(6): self._set_labels_(self.team[monster], monster) # class mouselistener(QLabel): # def __init__(self): # super().__init__() # # self.setMouseTracking(True) # self.widget_location = self.rect() # # def mouseMoveEvent(self, event): # posMouse = event.pos() # font = QFont() # if self.widget_location.contains(posMouse): # font.setPointSize(8) # # QToolTip.setFont(font) # self.setToolTip(self.text()) # # return super().mouseReleaseEvent(event)
/PADCompleter.py
__author__ = 'Aaron' from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5 import QtWidgets, QtCore, QtGui class PADCompleter(QCompleter): def __init__(self): super().__init__() self.prefix = '' self.model = None def _set_model_(self, model): self.model = model super().setModel(self.model) def _update_model_(self): prefix = self.prefix class InnerProxyModel(QSortFilterProxyModel): def filterAcceptsRow(self, row, parent): index = self.sourceModel().index(row, 0, parent) search_string = prefix.lower() model_string = self.sourceModel().data(index, Qt.DisplayRole).lower() #print(search_string, 'in', model_string, search_string in model_string) return search_string in model_string proxy_model = InnerProxyModel() proxy_model.setSourceModel(self.model) self.setModel(proxy_model) #print('match :', proxy_model.rowCount()) def splitPath(self, path): self.prefix = str(path) self._update_model_() return self.sourceModel().data()
/PADScreen.py
__author__ = 'Aaron' from Calculator_Screen import CalculatorScreen from Board_Screen import BoardScreen from PAD_Monster import PADMonster from PAD_Team import PADTeam from PyQt5.QtWidgets import (QVBoxLayout, QHBoxLayout, QWidget, QPushButton, QSplitter, QAction, QFileDialog, QMainWindow, QStackedWidget, QSplitter) from PyQt5.QtCore import Qt import os import json from functools import partial class PADScreen(QStackedWidget): def __init__(self, main_window): """ Initialize the PADScreen Class :param gui: the main interface which will hold all of our widgets :param main_window: the main window widget which will hold our menu bar """ super().__init__() # create an open file and save file action for our menu bar and connects them to their # respective functions open_file = QAction('Load Team...', main_window) open_file.setShortcut('Ctrl+O') open_file.triggered.connect(partial(self._show_dialog_box_, 'Open', main_window)) save_file = QAction('Save Team...', main_window) save_file.setShortcut('Ctrl+S') save_file.triggered.connect(partial(self._show_dialog_box_, 'Save', main_window)) clear_team = QAction('New Team', main_window) clear_team.setShortcut('Ctrl+N') clear_team.triggered.connect(self.__clear__team__) # create our menu bar, attach it to our main window and add to it our open and save actions menubar = main_window.menuBar() file_menu = menubar.addMenu('&File') file_menu.addAction(open_file) file_menu.addAction(save_file) file_menu.addAction(clear_team) # create the widget containing the first page of the GUI, the calculator page self.calculator_screen = QWidget(self) # use custom calculator layout for the widget's layout self.calculator_screen_layout = CalculatorScreen(self) self.calculator_screen.setLayout(self.calculator_screen_layout) # initialize a variable to hold the PADTeam self.pad_team = self.calculator_screen_layout.pad_team self.team = self.calculator_screen_layout.team # create the widget containing the second page of the GUI, the board page self.board_screen = QWidget(self) # use custom board layout for the widget's layout self.board_screen_layout = BoardScreen(self, self.team, self.pad_team) self.board_screen.setLayout(self.board_screen_layout) # initially hide this page until the next page button is pressed #self.board_screen.hide() # create the bottom widget for the GUI which will contain the page turning buttons self.page_turner = QWidget(main_window) page_turner_layout = QHBoxLayout(main_window) self.page_turner.setLayout(page_turner_layout) self.turn_left = QPushButton('<', main_window) page_turner_layout.addWidget(self.turn_left) page_turner_layout.addStretch() page_turner_layout.addStretch() self.turn_right = QPushButton('>', main_window) page_turner_layout.addWidget(self.turn_right) # initially hide the button to turn left as the GUI initializes on page 1 self.turn_left.hide() self.page_one_splitter = QSplitter(Qt.Vertical) self.page_one_splitter.addWidget(self.calculator_screen) self.page_one_splitter.addWidget(self.page_turner) self.addWidget(self.page_one_splitter) #self.setCurrentWidget(self.page_one_splitter) self.page_two_splitter = QSplitter(Qt.Vertical) self.page_two_splitter.addWidget(self.board_screen) #self.page_two_splitter.addWidget(page_turner) self.addWidget(self.page_two_splitter) #self.setCurrentWidget(self.page_two_splitter) self._init_screen_() def _init_screen_(self): """ Set right click button to connect to the second page :param gui: the main interface all the widgets will be attached to """ self.turn_right.clicked.connect(self._go_to_board_screen_) def _go_to_board_screen_(self, clicked): """ Set the active screen to the second page and hide the first page when the respective button is clicked. Also hide the right button, show the left button and connect the left button to the first page. :param gui: same. :param clicked: the clicking event, useless. """ self.board_screen_layout.team = self.calculator_screen_layout.team self.board_screen_layout.team_totals = self.calculator_screen_layout.pad_team self.board_screen_layout.set__team(self.board_screen_layout.team) self.setCurrentWidget(self.page_two_splitter) self.page_two_splitter.addWidget(self.page_turner) #self.board_screen.show() #self.calculator_screen.hide() self.turn_right.hide() self.turn_left.show() self.turn_left.clicked.connect(self._go_to_calculator_screen_) def _go_to_calculator_screen_(self, clicked): """ Set the active screen to the first page and hide the second page when the respective button is clicked. Also hide the left button, show the right button and connect the right button to the second page. :param gui: same. :param clicked: useless clicking event. """ self._init_screen_() self.turn_left.hide() self.turn_right.show() self.turn_right.clicked.connect(self._go_to_board_screen_) self.page_one_splitter.addWidget(self.page_turner) self.setCurrentWidget(self.page_one_splitter) #self.board_screen.hide() #self.calculator_screen.show() def _show_dialog_box_(self, stringname, gui): """ If the stringname is 'Open', open a dialog where the user can select a team to load into the line edits. If the stringname is 'Save', open a dialog where the user can save the names of the team members into a txt file. :param stringname: 'Open' or 'Save', the corresponding menu action will contain the key stringname. :param gui: same. """ if stringname == 'Open': filename = QFileDialog.getOpenFileName(gui, 'Load Team...', os.path.join('saved teams'), 'Text files (*.txt)') # if not empty string and has the appropriate subscript if filename[0] and filename[0].endswith('txt'): with open(os.path.realpath(filename[0]), 'r') as file: json_content = json.loads(file.read()) # decode the names in case of unicode strings like the infinity sign #content_decoded = content.decode('utf-8') #monster_names = content_decoded.splitlines() for monster in range(6): # decode the name in case of unicode strings like the infinity sign # name = json_content[monster]['name'].decode('utf-8') name = json_content[monster]['name'] hp_plus = json_content[monster]['hp plus'] atk_plus = json_content[monster]['atk plus'] rcv_plus = json_content[monster]['rcv plus'] level = json_content[monster]['level'] # enter the names into the line edits self.calculator_screen_layout.line_edits[monster].setText(name) self.calculator_screen_layout._on_plus_value_activated_(monster, 'hp', hp_plus) self.calculator_screen_layout._on_plus_value_activated_(monster, 'atk', atk_plus) self.calculator_screen_layout._on_plus_value_activated_(monster, 'rcv', rcv_plus) self.calculator_screen_layout._on_level_activated_(monster, level) if stringname == 'Save': filename = QFileDialog.getSaveFileName(gui, 'Save Team...', os.path.join('saved teams'), 'Text files (*.txt') # if not empty string if filename[0]: # create json file json_file = [{} for monster in range(6)] #monster_names = '' for monster in range(6): # copy the team member's name to a variable monster_name = self.calculator_screen_layout.team[monster].name # copy the team member's pluses to variables hp_plus = self.calculator_screen_layout.team[monster].hp_plus atk_plus = self.calculator_screen_layout.team[monster].base_atk_plus rcv_plus = self.calculator_screen_layout.team[monster].rcv_plus # copy the team member's current level to a variable current_level = self.calculator_screen_layout.team[monster].current_level #monster_names += monster_name+'\n' # encode the string to be saved for symbols like the infinity sign #monster_name_encoded = monster_name.encode('utf8', 'replace') json_file[monster]['name'] = monster_name json_file[monster]['hp plus'] = hp_plus json_file[monster]['atk plus'] = atk_plus json_file[monster]['rcv plus'] = rcv_plus json_file[monster]['level'] = current_level with open(os.path.realpath(filename[0]+'.txt'), 'w') as file: json.dump(json_file, file) def __clear__team__(self): for index in range(6): self.calculator_screen_layout.line_edits[index].clear() self.calculator_screen_layout.team = [PADMonster() for monster in range(6)] self.calculator_screen_layout.pad_team = PADTeam(self.calculator_screen_layout.team) for index in range(6): self.calculator_screen_layout._set_labels_(self.calculator_screen_layout.team[index], index) # self.calculator_screen = QWidget(gui) # self.calculator_screen_layout = CalculatorScreen(gui) # self.calculator_screen.setLayout(self.calculator_screen_layout) # self.active_screen = self.calculator_screen
/PAD_GUI.py
__author__ = 'Aaron' # import necessary files from PyQt5 import PyQt5 import sys from PyQt5.QtWidgets import (QApplication, QWidget, QHBoxLayout, QFrame, QSplitter, QStyleFactory, QMainWindow, QStackedWidget) from PyQt5.QtCore import Qt from PADScreen import PADScreen class GUIMainWindow(QMainWindow): def __init__(self): super().__init__() widget = PADScreen(self) self.setCentralWidget(widget) self.setGeometry(300, 300, 300, 200) self.setWindowTitle('PAD Damage Calculator') self.show() class PADGUI(QStackedWidget): def __init__(self, main_window): super().__init__() self.init_UI(main_window) def init_UI(self, main_window): #The initial screen that we'll be working on screen = PADScreen(self, main_window) screen_widget = QWidget(main_window) #Make the main screen our layout screen_widget.setLayout(screen) self.addWidget(screen_widget) #Add simulation screen here: #Set the window dimensions, title and show it off! self.setGeometry(300, 300, 300, 200) self.setWindowTitle('PAD Damage Calculator') self.show() if __name__ == '__main__': app = QApplication(sys.argv) gui = GUIMainWindow() sys.exit(app.exec_())
/PAD_Monster.py
__author__ = 'Aaron' # Class Description: # Our Monster Class where we hold all of the Monster's stats and calculate the values needed with those stats import os import json class PADMonster: def __init__(self): # initialize the Class's stats # _max, _min, and _scale are used for when the monster's level is set to something other than its max level # _bonus used for when awakenings add value to the base stat self.name = '' self.hp = 0 self.hp_max = 0 self.hp_min = 0 self.hp_scale = 0 self.hp_plus = 0 self.hp_bonus = 0 self.hp_base = 0 self.rcv_base = 0 self.rcv = 0 self.rcv_max = 0 self.rcv_min = 0 self.rcv_scale = 0 self.rcv_plus = 0 self.rcv_bonus = 0 self.base_base_atk = 0 self.base_atk = 0 self.base_atk_max = 0 self.base_atk_min = 0 self.base_atk_scale = 0 self.base_atk_plus = 0 self.base_atk_bonus = 0 # Array of Attack: atk[attribute] self.atk = [0, 0, 0, 0, 0] # Array of Pronged Attack: [attribute][0 = Main, 1 = Sub] self.pronged_atk = [0, 0, 0, 0, 0] self.max_level = 99 self.current_level = 99 # 'fire' = 0, 'water' = 1, 'wood' = 2, 'light' = 3, 'dark' = 4 self.attr_main = 0 self.attr_sub = 0 # check if main attribute = sub attribute self.is_same_attr = False # save list of attribute types self.attributes = ['fire', 'water', 'wood', 'light', 'dark'] # see list of types for corresponding index number self.type_main = 0 self.type_sub = 0 self.type_main_name = '' self.type_sub_name = '' # save list of types self.types = ['Evo Material', 'Balanced', 'Physical', 'Healer', 'Dragon', 'God', 'Attacker', 'Devil', '', '', '', '', 'Awoken Skill Material', 'Protected', 'Enhance Material'] # save leader skill multipliers; leader_skill[0 = hp, 1 = atk, 2 = rcv] self.leader_skill = [0, 0, 0] # store image 60x60 size and file location on padherder.com self.image60_size = 0 self.image60_href = '' # save amount of each awoken skill # id: 1 -> Enhanced HP, 2 -> Enhanced Attack, 3 -> Enhanced Heal, 4 -> Reduce Fire Damage, # 5 -> Reduce Water Damage, # 6 -> Reduce Wood Damage, 7 -> Reduce Light Damage, 8 -> Reduce Dark Damage, 9 -> Auto-Recover, # 10 -> Resistance-Bind, 11 -> Resistance-Dark, 12 -> Resistance-Jammers, 13 -> Resistance-Poison, # 14 -> Enhanced Fire Orbs, 15 -> Enhanced Water Orbs, 16 -> Enhanced Wood Orbs, 17 -> Enhanced Light Orbs, # 18 -> Enhanced Dark Orbs, 19 -> Extend Time, 20 -> Recover Bind, 21 -> Skill Boost, 22 -> Enhanced Fire Att., # 23 -> Enhanced Water Att., 24 -> Enhanced Wood Att., 25 -> Enhanced Light Att., 26 -> Enhanced Dark Att., # 27 -> Two-Pronged Attack, 28 -> Resistance-Skill Lock self.awakenings = [['', '', 0] for x in range(28)] self.awakenings_names = ['Enhanced HP', 'Enhanced Attack', 'Enhanced Heal', 'Reduce Fire Damage', 'Reduce Water Damage', 'Reduce Wood Damage', 'Reduce Light Damage', 'Reduce Dark Damage', 'Auto-Recover', 'Resistance-Bind', 'Resistance-Dark', 'Resistance-Jammers', 'Resistance-Poison', 'Enhanced Fire Orbs', 'Enhanced Water Orbs', 'Enahnced Wood Orbs', 'Enhanced Light Orbs', 'Enhanced Dark Orbs', 'Extend Time', 'Recover Bind', 'Skill Boost', 'Enhanced Fire Att.', 'Enhanced Water Att.', 'Enhanced Wood Att.', 'Enhanced Light Att.', 'Enhanced Dark Att.', 'Two-Pronged Attack', 'Resistance-Skill Lock'] # open awakenings.txt and load it into a python object using json self.json_file = open(os.path.join('awakenings.txt'), 'r') self.json_awakenings = json.loads(self.json_file.read()) # iterate through self.json_awakenings and extract the necessary information into self.awakenings # awakenings[id-1][name, desc, count] for awakening in self.json_awakenings: self.awakenings[awakening['id'] - 1] = [awakening['name'], awakening['desc'], 0] # leader skill self.leader_skill_name = '' self.leader_skill_desc = '' # [xhp, xatk, xrcv, ['elem/type?', which elem/type?]] self.leader_skill_effect = [1, 1, 1] self.json_file = open(os.path.join('leader skills.txt'), 'r') self.json_leader_skills = json.loads(self.json_file.read()) def set_base_stats(self, name, hp, atk, rcv, attr1, attr2, type1, type2, size, href, awakenings, leader_skill, level, hp_min, hp_scale, atk_min, atk_scale, rcv_min, rcv_scale): self.name = name self.hp = hp self.hp_base = hp self.hp_max = hp self.hp_min = hp_min self.hp_scale = hp_scale self.base_atk = atk self.base_base_atk = atk self.base_atk_max = atk self.base_atk_min = atk_min self.base_atk_scale = atk_scale self.rcv = rcv self.rcv_base = rcv self.rcv_max = rcv self.rcv_min = rcv_min self.rcv_scale = rcv_scale self.max_level = level self.current_level = level self.attr_main = attr1 self.attr_sub = attr2 self.type_main = type1 self.type_main_name = self.types[type1] self.type_sub = type2 if type2: self.type_sub_name = self.types[type2] self.image60_size = size self.image60_href = href self.leader_skill_name = leader_skill for awakening in awakenings: self.awakenings[awakening - 1][2] += 1 # sets _bonus stats if awakenings[0-2][2] a.k.a. the stat bonus awakenings are greater than 1 for x in range(3): if self.awakenings[x][2] > 0: if x == 0: self.hp_bonus = self.awakenings[x][2] * 200 self.hp += self.hp_bonus self.hp_base = self.hp if x == 1: self.base_atk_bonus = self.awakenings[x][2] * 100 self.base_atk += self.base_atk_bonus self.base_base_atk = self.base_atk if x == 2: self.rcv_bonus = self.awakenings[x][2] * 50 self.rcv += self.rcv_bonus self.rcv_base = self.rcv # find the leader skills' effects and description in the json library according to the name for x in range(len(self.json_leader_skills)): if leader_skill == self.json_leader_skills[x]['name']: self.leader_skill_desc = self.json_leader_skills[x]['effect'] if 'data' in self.json_leader_skills[x].keys(): self.leader_skill_effect = self.json_leader_skills[x]['data'] self._set_atk_(self.attr_main, self.attr_sub) self._set_pronged_atk_(self.attr_main, self.attr_sub) def _set_attr_main_(self, attr): """ If the attribute name is valid, set the Class's attr_main value to the value corresponding to the attr :param attr: attribute name """ if attr.lower() in self.attributes: self.attr_main = self.attributes.index(attr.lower()) # if attribute is changed, check if main and sub attributes are the same if self.attr_main == self.attr_sub: self.is_same_attr = True else: self.is_same_attr = False def _set_attr_sub_(self, attr): """ If the attribute name is valid, set the Class's attr_sub value to the value corresponding to the attr :param attr: attribute name """ if attr.lower() in self.attributes: self.attr_sub = self.attributes.index(attr.lower()) # if attribute is changed, check if main and sub attributes are the same if self.attr_main == self.attr_sub: self.is_same_attr = True else: self.is_same_attr = False def _set_atk_(self, attr1, attr2): """ Calculate and set atk for each attribute :param attr1: value corresponding to main attribute :param attr2: value corresponding to sub attribute """ if attr1 in [0, 1, 2, 3, 4]: if attr1 != attr2: self.atk[attr1] = self.base_atk else: self.atk[attr1] = self.base_atk * 1.1 if attr2 in [0, 1, 2, 3, 4]: if attr1 != attr2: self.atk[attr2] = self.base_atk * (1/3) def _set_pronged_atk_(self, attr1, attr2): """ Calculate and set pronged atk for each attribute :param attr1: value corresponding to main attribute :param attr2: value corresponding to sub attribute """ if attr1 in [0, 1, 2, 3, 4]: self.pronged_atk[attr1] = self.atk[attr1] * 1.5 ** self.awakenings[26][2] if attr2 in [0, 1, 2, 3, 4] and attr1 != attr2: self.pronged_atk[attr2] = self.atk[attr2] * 1.5 ** self.awakenings[26][2] def _set_stats_at_level_(self, level): """ Modify all stats according to level. :param level: Level the monster will be set to. """ self.current_level = level self.hp = self._use_growth_formula(self.hp_min, self.hp_max, self.hp_scale) self.hp += self.hp_bonus self.hp_base = self.hp self._set_stats_with_pluses_('hp', self.hp_plus) self.base_atk = self._use_growth_formula(self.base_atk_min, self.base_atk_max, self.base_atk_scale) self.base_atk += self.base_atk_bonus self.base_base_atk = self.base_atk self._set_stats_with_pluses_('atk', self.base_atk_plus) self.rcv = self._use_growth_formula(self.rcv_min, self.rcv_max, self.rcv_scale) self.rcv += self.rcv_bonus self.rcv_base = self.rcv self._set_stats_with_pluses_('rcv', self.rcv_plus) def _use_growth_formula(self, min_value, max_value, scale): """ Applies the growth formula to get the values of the specified stat at the current level. :param min_value: the minimum value of the stat :param max_value: the maximum value of the stat :param scale: the scaling rate of the stat :return: the value of the stat at the current level """ value = ((self.current_level - 1) / (self.max_level - 1)) ** scale value *= (max_value - min_value) value += min_value return value def _set_stats_with_pluses_(self, type, num): """ Modify the specified stat according to the specified amount of pluses :param type: 'hp', 'atk', or 'rcv' :param num: 0-99, the number of pluses for the specified stat """ if type == 'hp': self.hp_plus = num self.hp = self.hp_base + self.hp_plus * 10 elif type == 'atk': self.base_atk_plus = num self.base_atk = self.base_base_atk + self.base_atk_plus * 5 self._set_atk_(self.attr_main, self.attr_sub) self._set_pronged_atk_(self.attr_main, self.attr_sub) elif type == 'rcv': self.rcv_plus = num self.rcv = self.rcv_base + self.rcv_plus * 3
/PAD_Team.py
__author__ = 'Aaron' import os from PAD_Monster import PADMonster class PADTeam: def __init__(self, team): """ Initializes the PADTeam Class. :param team: an array containing 6 PADMonster Classes """ # self.team = [PADMonster() for monster in range(6)] -> how the team should look self.team = team # below we initialize the variables that will be containing the team stats. self.hp = 0 # for all atk arrays: [fire atk, water atk, wood atk, light atk, dark atk] self.atk = [0, 0, 0, 0, 0] # for all base atks, it's the sum of each value in the array self.base_atk = 0 self.pronged_atk = [0, 0, 0, 0, 0] self.base_pronged_atk = 0 self.rcv = 0 # below we initialize the modified stats, the team's total stats after being # multiplied by the effects of the two leader skills self.hp_modified = 0 self.atk_modified = [0, 0, 0, 0, 0] self.base_atk_modified = 0 self.pronged_atk_modified = [0, 0, 0, 0, 0] self.base_pronged_atk_modified = 0 self.rcv_modified = 0 # a string that will contain all our the teams' awakenings self.awakenings = '' # the leader skills effects: [hp multiplied by, atk multiplied by, rcv multiplied by] self.leader1_effects = [1, 1, 1] self.leader2_effects = [1, 1, 1] # store how each monster's stats will be modified as in if the monster satisfies the # leader skill's conditions self.stats_modified_by = [[1, 1, 1] for monster in range(6)] # set all the variables according to the team input self.__set__team__hp() self.__set__team__rcv() self.__set__team__atk() self.__set__team__base__atk() self.__set__team__awakenings() self.__set__modified__stats__() def __set__team__hp(self): self.hp = 0 for monster in range(6): self.hp += self.team[monster].hp def __set__team__rcv(self): self.rcv = 0 for monster in range(6): self.rcv += self.team[monster].rcv def __set__team__awakenings(self): self.awakenings = '' for awakening in range(len(self.team[0].awakenings)): # count stores how many instances of a specific awakening are contained in the team count = 0 for monster in range(6): if self.team[monster].awakenings[awakening][2] > 0: count += self.team[monster].awakenings[awakening][2] if count > 0: # if the team has an awakening, save it to the string and add the count number self.awakenings += self.team[0].awakenings[awakening][0]+': '+str(count)+'\n' def __set__team__atk(self): self.atk = [0, 0, 0, 0, 0] self.pronged_atk = [0, 0, 0, 0, 0] for attr in range(5): for monster in self.team: self.atk[attr] += monster.atk[attr] self.pronged_atk[attr] += monster.pronged_atk[attr] def __set__team__base__atk(self): self.base_atk = 0 self.base_pronged_atk = 0 for monster in self.team: self.base_atk += monster.atk[monster.attr_main] self.base_pronged_atk += monster.pronged_atk[monster.attr_main] def __set__modified__stats__(self): self.stats_modified_by = [[1, 1, 1] for monster in range(6)] # the first and last team members of the team are considered the leaders and we use # their respective leader skills. for index in [0, 5]: # if the leader skill isn't "" if self.team[index].leader_skill_name: # the skill effect will look [hp modified by, atk modified by, rcv modified by] # an additional 4th index exists if there's a conditional which will look like: # [hp * by, atk * by, rcv * by, ['elem' or 'type', # associated with elem or type]] if len(self.team[index].leader_skill_effect) > 3: # if fourth array exists, save whether the conditional asks for an element # or type in attribute variable # and save the # associated in the num variable attribute = self.team[index].leader_skill_effect[3][0] num = self.team[index].leader_skill_effect[3][1] # check if each monster in the team satisfies the elem or type conditional # if true, the stats modified index for that monster will be multiplied appropriately if attribute == "elem": for monster in range(6): if self.team[monster].attr_main == num or self.team[monster].attr_sub == num: self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0] self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1] self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2] elif attribute == "type": for monster in range(6): if self.team[monster].type_main == num or self.team[monster].type_sub == num: self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0] self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1] self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2] # if there isn't a 4th index conditional, just multiply all of the stats modified indexes # by the appropriate skill effect amounts else: for monster in range(6): self.stats_modified_by[monster][0] *= self.team[index].leader_skill_effect[0] self.stats_modified_by[monster][1] *= self.team[index].leader_skill_effect[1] self.stats_modified_by[monster][2] *= self.team[index].leader_skill_effect[2] hp = 0 base_atk = 0 atk = [0, 0, 0, 0, 0] base_pronged_attack = 0 pronged_atk = [0, 0, 0, 0, 0] rcv = 0 # modify each team stat according to the leader skills' effects and save them to their respective # variables. for monster in range(6): hp += self.team[monster].hp * self.stats_modified_by[monster][0] rcv += self.team[monster].rcv * self.stats_modified_by[monster][2] main_attr = self.team[monster].attr_main base_atk += self.team[monster].atk[main_attr] * self.stats_modified_by[monster][1] base_pronged_attack += self.team[monster].pronged_atk[main_attr] * self.stats_modified_by[monster][1] for attr in range(5): atk[attr] += self.team[monster].atk[attr] * self.stats_modified_by[monster][1] pronged_atk[attr] += self.team[monster].pronged_atk[attr] * self.stats_modified_by[monster][1] self.hp_modified = hp self.atk_modified = atk self.base_atk_modified = base_atk self.pronged_atk_modified = pronged_atk self.base_pronged_atk_modified = base_pronged_attack self.rcv_modified = rcv
/image_updater.py
__author__ = 'Aaron' # Class Description: # Update our monsters.txt file and our images folder from urllib3 import urllib3 import shutil import os import json class image_updater(): def __init__(self): # update monsters.txt here: self.json_file = open(os.path.realpath('./monsters.txt'), 'r') self.json_object = json.loads(self.json_file.read()) path = os.path.realpath('images') team = ['Sparkling Goddess of Secrets, Kali', 'Holy Night Kirin Princess, Sakuya', 'Soaring Dragon General, Sun Quan', 'divine law goddess, valkyrie rose'] for x in range(len(self.json_object)): #for x in range(1): url = 'https://padherder.com'+self.json_object[x]["image60_href"] #print(url) name = self.json_object[x]["name"] if name in team: #if name.islower(): # name += 'chibi' request = urllib3.PoolManager().request('GET', url) #print(os.path.realpath('images2')) #is_accessible = os.access(path, os.F_OK) #print(is_accessible) # if the directory doesn't exist, create the directory - too risky #if is_accessible == False: # os.makedirs(os.path.realpath('images2')) os.chdir(path) #print(path) #print(path+'\\'+name+'.png') if os.access(path+'/'+name+'.png', os.F_OK) == False: with open(os.path.join(path+'/'+name+'.png'), 'wb') as file: file.write(request.data) request.release_conn() else: print(name+'.png already exists.') if __name__ == '__main__': updater = image_updater()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
guanjz20/MM21_FME_solution
refs/heads/master
{"/main_cls.py": ["/model/network.py", "/dataset/me_dataset.py"]}
└── ├── config.py ├── dataset │ ├── me_dataset.py │ ├── params.py │ └── utils.py ├── main_cls.py ├── model │ ├── network.py │ └── utils.py ├── paths.py ├── preprocess │ ├── CNN_feature_extraction.py │ ├── casme_2_label_generation.py │ ├── openface │ │ └── face_crop_align.py │ ├── params.py │ └── samm_2_label_generation.py ├── submit.py ├── trainer_cls.py └── utils.py
/config.py
import argparse parser = argparse.ArgumentParser(description="x") parser.add_argument('--store_name', type=str, default="") parser.add_argument('--save_root', type=str, default="") parser.add_argument('--tag', type=str, default="") parser.add_argument('--snap', type=str, default="") parser.add_argument('--dataset', type=str, default="", choices=['SAMM', 'CASME_2']) parser.add_argument('--data_aug', action='store_true') parser.add_argument('--distributed', action='store_true') parser.add_argument('--amp', action='store_true') parser.add_argument("--local_rank", default=0, type=int) parser.add_argument("--seed", default=111, type=int) parser.add_argument('--finetune_list', default=[], type=str, nargs="+", help='finetune subjects') parser.add_argument("--patience", default=15, type=int, help='front extend patience') # ========================= Model Configs ========================== parser.add_argument('--hidden_units', default=[2048, 256, 256], type=int, nargs="+", help='hidden units set up') parser.add_argument('--length', type=int, default=64) parser.add_argument('--step', type=int, default=64) parser.add_argument('-L', type=int, default=12, help='the number of input difference images') parser.add_argument('--input_size', type=int, default=112) parser.add_argument('--data_option', type=str, choices=['diff', 'wt_diff', 'wt_dr']) # ========================= Learning Configs ========================== parser.add_argument('--epochs', default=25, type=int, metavar='N', help='number of total epochs to run') parser.add_argument( '--early_stop', type=int, default=3) # if validation loss didn't improve over 3 epochs, stop parser.add_argument('-b', '--batch_size', default=16, type=int, metavar='N', help='mini-batch size (default: 16)') parser.add_argument('--lr', default=1e-2, type=float) parser.add_argument('--lr_decay_factor', default=0.1, type=float) parser.add_argument('--lr_steps', default=[2, 5], type=float, nargs="+", metavar='LRSteps', help='epochs to decay learning rate by factor') parser.add_argument('--optim', default='SGD', type=str) parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float, metavar='W', help='weight decay (default: 5e-4)') parser.add_argument('--clip-gradient', '--gd', default=20, type=float, metavar='W', help='gradient norm clipping (default: 20)') parser.add_argument('--focal_alpha', default=[1., 1.], type=float, nargs="+") # ========================= Monitor Configs ========================== parser.add_argument('--print-freq', '-p', default=50, type=int, metavar='N', help='print frequency (default: 50) iteration') parser.add_argument('--eval-freq', '-ef', default=1, type=int, metavar='N', help='evaluation frequency (default: 1) epochs') # ========================= Runtime Configs ========================== parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--delete_last', action='store_true', help='delete the last recorded subject') parser.add_argument('-t', '--test', dest='test', action='store_true', help='evaluate model on test set') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--gpus', type=str, default=None) parser.add_argument('--root_log', type=str, default='log') parser.add_argument('--root_model', type=str, default='model') parser.add_argument('--root_output', type=str, default='output') parser.add_argument('--root_runs', type=str, default='runs') parser.add_argument('--load_pretrained', type=str, default='') parser.add_argument('--load_bn', action='store_true')
/dataset/me_dataset.py
from unicodedata import name import cv2 import os import pdb import torch import time import pywt import glob import numpy as np import os.path as osp from tqdm import tqdm from torch.utils.data import Dataset from torch import nn as nn from . import params from . import utils WT_CHANNEL = 4 sm_kernel = np.load(params.GAUSS_KERNEL_PATH['sm_kernel']) dr1_kernel = np.load(params.GAUSS_KERNEL_PATH['dr1_kernel']) dr2_kernel = np.load(params.GAUSS_KERNEL_PATH['dr2_kernel']) dr1_kernel = dr1_kernel[:, None, None] dr2_kernel = dr2_kernel[:, None, None] class SAMMDataset(Dataset): def __init__(self, mode, img_dirs, seq_len=64, step=32, time_len=12, input_size=256, data_aug=False, data_option=None, dataset_name='SAMM'): super().__init__() self.dataset_name = dataset_name self.mode = mode self.seq_len = seq_len self.step = step assert mode == 'train' or (mode == 'test' and self.seq_len <= self.step) self.time_len = time_len # observate time_len//2 frames before and after self.size = input_size if data_option == 'diff' else input_size * 2 self.img_dirs = img_dirs # imgs files dirs if not isinstance(self.img_dirs, list): self.img_dirs = [self.img_dirs] self.img_ps_dict = self._get_img_ps_dict() self.seq_list = self._get_seq_list() self.label_dict = np.load(osp.join(params.SAMM_ROOT, 'label_dict.npy'), allow_pickle=True).item() self.anno_dict = np.load(osp.join(params.SAMM_ROOT, 'anno_dict.npy'), allow_pickle=True).item() # print('Load {} clips, {} frames from {}'.format( # len(self.seq_list), # len(self.seq_list) * self.seq_len, dataset_name)) self.transform = utils.get_group_transform( mode) if data_aug else utils.Identity() self.data_option = data_option def _get_img_ps_dict(self): ret_dict = {} for img_dir in self.img_dirs: img_ps = utils.scan_jpg_from_img_dir(img_dir) ret_dict[img_dir] = tuple(img_ps) return ret_dict def _get_seq_list(self): ret_list = [] for img_dir, img_ps in self.img_ps_dict.items(): front = 0 tail = front + self.seq_len # [front, tail), tail not include while tail <= len(img_ps): ret_list.append([img_dir, front, tail]) # (img dir, front_idx, tail_idx) front += self.step tail = front + self.seq_len return ret_list def __len__(self): return len(self.seq_list) def __getitem__(self, index): img_dir, front, tail = self.seq_list[ index] # [front, tail), tail not include seq_info = (img_dir, front, tail) # insert and append extra imgs for temporal conv _old_len = len(self.img_ps_dict[img_dir]) img_ps = list(self.img_ps_dict[img_dir][front:tail]) for i in range(1, self.time_len // 2 + 1): img_ps.insert(0, self.img_ps_dict[img_dir][max(0, front - i)]) img_ps.append(self.img_ps_dict[img_dir][min( _old_len - 1, tail - 1 + i)]) _cur_len = len(self.img_ps_dict[img_dir]) assert _old_len == _cur_len # make sure the dict has not been changed # read seqence features, annos and labels img_features = np.stack([ np.load(p.replace('.jpg', '.npy')) for p in img_ps[self.time_len // 2:-self.time_len // 2] ], 0) annos = self.anno_dict[img_dir][front:tail] labels = self.label_dict[img_dir][front:tail] assert img_features.shape == (self.seq_len, 2048) # resnet50 features # read sequence imgs flat_imgs = np.empty( (self.seq_len + (self.time_len // 2) * 2, self.size, self.size), dtype=np.float32) for i, p in enumerate(img_ps): img = cv2.imread(p, cv2.IMREAD_GRAYSCALE) if not img.shape[0] == img.shape[1]: # crop to square h, w = img.shape wide = abs(h - w) // 2 if h > w: img = img[wide:wide + w, :] else: img = img[:, wide:wide + h] try: assert img.shape[0] == img.shape[1] except: print('Error in cropping image {}'.format(p)) img = cv2.resize(img, (self.size, self.size)) flat_imgs[i] = img # transform flat_imgs = self.transform(flat_imgs) if self.data_option is not None and 'wt' in self.data_option: flat_wts = np.stack([dwt2(img) for img in flat_imgs], 0) # expand falt imgs i = 0 front = 0 tail = front + self.time_len # [front, tail], tail include if self.data_option is not None and 'wt' in self.data_option: seq_wts = np.empty((self.seq_len, self.time_len + 1, WT_CHANNEL, self.size // 2, self.size // 2), dtype=np.float32) elif self.data_option == 'diff': seq_imgs = np.empty( (self.seq_len, self.time_len + 1, self.size, self.size), dtype=np.float32) while tail < len(flat_imgs): if self.data_option is not None and 'wt' in self.data_option: seq_wts[i] = flat_wts[front:tail + 1].copy() elif self.data_option == 'diff': seq_imgs[i] = flat_imgs[front:tail + 1].copy() i += 1 front += 1 tail += 1 assert i == self.seq_len # data options if self.data_option == 'diff': ret_coefs = np.stack([get_diff(imgs) for imgs in seq_imgs], 0) elif self.data_option == 'wt_diff': ret_coefs = np.stack([get_diff(coefs) for coefs in seq_wts], 0).reshape(self.seq_len, self.time_len * WT_CHANNEL, self.size // 2, self.size // 2) elif self.data_option == 'wt_dr': ret_coefs = seq_wts.transpose(0, 2, 1, 3, 4) ret_coefs = np.asarray([[ get_smoothing_and_dr_coefs(coefs_dim2) for coefs_dim2 in coefs_dim1 ] for coefs_dim1 in ret_coefs]) assert ret_coefs.shape[:3] == (self.seq_len, WT_CHANNEL, 3 * 2) ret_coefs = ret_coefs.transpose(0, 2, 1, 3, 4) ret_coefs = ret_coefs.reshape(self.seq_len, -1, self.size // 2, self.size // 2) elif self.data_option is None: print('Require data option...') exit() else: raise NotImplementedError ret_coefs = torch.FloatTensor(ret_coefs) img_features = torch.FloatTensor(img_features) annos = torch.FloatTensor(annos) labels = torch.LongTensor(labels) return ret_coefs, img_features, annos, labels, seq_info class CASME_2Dataset(SAMMDataset): def __init__(self, mode, img_dirs, seq_len=64, step=32, time_len=12, input_size=256, data_aug=False, data_option=None, dataset_name='CASME_2'): super().__init__(mode, img_dirs, seq_len=seq_len, step=step, time_len=time_len, input_size=input_size, data_aug=data_aug, data_option=data_option, dataset_name=dataset_name) self.label_dict = np.load(osp.join(params.CASME_2_LABEL_DIR, 'label_dict.npy'), allow_pickle=True).item() self.anno_dict = np.load(osp.join(params.CASME_2_LABEL_DIR, 'anno_dict.npy'), allow_pickle=True).item() class SAMMImageDataset(Dataset): def __init__(self, img_ps): super().__init__() self.img_ps = img_ps self.bi_label = np.load( osp.join(params.SAMM_ROOT, 'bi_label.npy'), allow_pickle=True).item() # imgs_dir -> [<target img_p> ... ] def __len__(self): return len(self.img_ps) def __getitem__(self, index): img_p = self.img_ps[index] npy_p = img_p.replace('.jpg', '.npy') feature = np.load(npy_p) feature = torch.tensor(feature, dtype=torch.float32) imgs_dir = osp.dirname(img_p) label = 1 if img_p in self.bi_label[ imgs_dir] else 0 # 1 for spotting region label = torch.tensor(label, dtype=torch.long) return feature, label, img_p class CASME_2ImageDataset(SAMMImageDataset): def __init__(self, img_ps): super().__init__(img_ps) self.bi_label = np.load( osp.join(params.CASME_2_LABEL_DIR, 'bi_label.npy'), allow_pickle=True).item() # imgs_dir -> [<target img_p> ... ] def get_diff(imgs): if len(imgs.shape) == 3: assert imgs.shape[1] == imgs.shape[2] # imgs elif len(imgs.shape) == 4: assert imgs.shape[2] == imgs.shape[ 3] and imgs.shape[1] == WT_CHANNEL # wt_coefs imgs1 = imgs[:-1] imgs2 = imgs[1:] return imgs2 - imgs1 def dwt2(img, wave_name='haar'): assert isinstance(img, np.ndarray) coefs = pywt.dwt2(img, wave_name) coefs = np.array([coefs[0], *coefs[1]]) return coefs # (4, w//2, h//2) def get_smoothing_and_dr_coefs(imgs): ''' GAUSS_KERNEL_PATH ''' global sm_kernel, dr1_kernel, dr2_kernel sm_imgs = np.array([cv2.filter2D(img, -1, sm_kernel) for img in imgs]) dr_ks = dr1_kernel.shape[0] dr1_res = [] dr2_res = [] for i in range(len(imgs) - dr_ks + 1): _imgs = sm_imgs[i:i + dr_ks] dr1_res.append((_imgs * dr1_kernel).sum(axis=0)) dr2_res.append((_imgs * dr2_kernel).sum(axis=0)) res = np.stack((*dr1_res, *dr2_res), 0) return res
/dataset/params.py
SAMM_ROOT = '/data/gjz_mm21/SAMM' CASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels' # kernel path GAUSS_KERNEL_PATH = { 'sm_kernel': '/home/gjz/lry_kernels/gauss2D-smooth.npy', 'dr1_kernel': '/home/gjz/lry_kernels/gauss1D-derivative1.npy', 'dr2_kernel': '/home/gjz/lry_kernels/gauss1D-derivative2.npy' }
/dataset/utils.py
from albumentations.augmentations.transforms import GaussNoise import cv2 import os import numpy as np import os.path as osp import albumentations as alb # from torch._C import Ident # from torch.nn.modules.linear import Identity class IsotropicResize(alb.DualTransform): def __init__(self, max_side, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC, always_apply=False, p=1): super(IsotropicResize, self).__init__(always_apply, p) self.max_side = max_side self.interpolation_down = interpolation_down self.interpolation_up = interpolation_up def apply(self, img, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC, **params): return isotropically_resize_image( img, size=self.max_side, interpolation_down=interpolation_down, interpolation_up=interpolation_up) def apply_to_mask(self, img, **params): return self.apply(img, interpolation_down=cv2.INTER_NEAREST, interpolation_up=cv2.INTER_NEAREST, **params) def get_transform_init_args_names(self): return ("max_side", "interpolation_down", "interpolation_up") class Identity(): def __init__(self): pass def __call__(self, x): return x class GroupTrainTransform(): def __init__(self): self.ImageCompression = alb.ImageCompression(quality_lower=60, quality_upper=100, p=1), self.GaussNoise = alb.GaussNoise(p=1), self.GaussianBlur = alb.GaussianBlur(blur_limit=(3, 5), p=1), self.HorizontalFlip = alb.HorizontalFlip(p=1), self.LightChange = alb.OneOf([ alb.RandomBrightnessContrast(), alb.FancyPCA(), alb.HueSaturationValue() ], p=1), self.ShiftRotate = alb.ShiftScaleRotate( shift_limit=0.1, scale_limit=0.2, rotate_limit=10, border_mode=cv2.BORDER_CONSTANT, p=1), def _apply_aug(imgs, aug_method): for i, img in enumerate(imgs): imgs[i] = aug_method(image=img)['image'] return imgs def __call__(self, imgs): # img compress if np.random.random() < 0.3: imgs = self._apply_aug(imgs, self.ImageCompression) # gauss noise if np.random.random() < 0.1: imgs = self._apply_aug(imgs, self.GaussNoise) # gauss blur if np.random.random() < 0.05: imgs = self._apply_aug(imgs, self.GaussianBlur) # flip if np.random.random() < 0.5: imgs = self._apply_aug(imgs, self.HorizontalFlip) # light if np.random.random() < 0.5: imgs = self._apply_aug(imgs, self.LightChange) # shift rotate if np.random.random() < 0.5: imgs = self._apply_aug(imgs, self.ShiftRotate) return imgs class GroupTestTransform(Identity): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def get_group_transform(mode): if mode == 'train': return GroupTrainTransform() elif mode == 'test': return GroupTestTransform() else: raise (NotImplementedError) def isotropically_resize_image(img, size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC): h, w = img.shape[:2] if max(w, h) == size: return img if w > h: scale = size / w h = h * scale w = size else: scale = size / h w = w * scale h = size interpolation = interpolation_up if scale > 1 else interpolation_down resized = cv2.resize(img, (int(w), int(h)), interpolation=interpolation) return resized def get_transform(mode, size): if mode == 'train': return get_train_transform(size) elif mode == 'test': return get_test_transform(size) else: raise (NotImplementedError) def get_test_transform(size): return alb.Compose([ IsotropicResize(max_side=size), alb.PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT), ]) def get_train_transform(size): return alb.Compose([ # alb.GaussNoise(p=0.1), # alb.GaussianBlur(blur_limit=(3, 5), p=0.1), alb.HorizontalFlip(), alb.OneOf([ IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC), IsotropicResize(max_side=size, interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR), IsotropicResize(max_side=size, interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR), ], p=1), alb.PadIfNeeded(min_height=size, min_width=size, border_mode=cv2.BORDER_CONSTANT), # alb.OneOf([ # alb.RandomBrightnessContrast(), # alb.FancyPCA(), # alb.HueSaturationValue() # ], # p=0.5), # alb.ToGray(p=0.2), # alb.ShiftScaleRotate(shift_limit=0.1, # scale_limit=0.1, # rotate_limit=5, # border_mode=cv2.BORDER_CONSTANT, # p=0.5), ]) def scan_jpg_from_img_dir(img_dir): img_ps = [ osp.join(img_dir, name) for name in sorted(os.listdir(img_dir), key=lambda x: int(x.split('.')[0].split('_')[-1])) if '.jpg' in name # !! sort key ] return img_ps
/main_cls.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from genericpath import exists import os from typing import Final import cv2 import sys from matplotlib.pyplot import xcorr from numpy.random import f, sample, shuffle from torch.utils.data import dataset from config import parser if len(sys.argv) > 1: # use shell args args = parser.parse_args() print('Use shell args.') else: # Debug args_list = [ '--dataset', 'SAMM', '--print-freq', '1', '--snap', 'debug', '--data_option', 'wt_diff', '--gpus', '0', '--batch_size', '2', '--input_size', '128', '--length', '64', '-L', '12', '--workers', '0', ] args = parser.parse_args(args_list) # os setting os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1" cv2.ocl.setUseOpenCL(False) cv2.setNumThreads(0) if args.gpus is not None: os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus import re import logging import time import torch import os.path as osp import torch.nn as nn import numpy as np import pandas as pd import torch.distributed as dist from torch.nn import DataParallel from torch.nn.parallel import DistributedDataParallel from datetime import datetime from tqdm import tqdm from pprint import pformat from timm.utils import setup_default_logging, NativeScaler, reduce_tensor, distribute_bn from timm.data.distributed_sampler import OrderedDistributedSampler from contextlib import suppress from model.network import Two_Stream_RNN_Cls, load_pretrained_model from dataset.me_dataset import SAMMDataset, CASME_2Dataset import utils import trainer_cls as trainer # torch.multiprocessing.set_start_method('spawn') torch.backends.cudnn.benchmark = True # check resume RESUME = osp.exists(args.resume) # check finetune if len(args.finetune_list) > 0: assert RESUME FINETUNE = True else: FINETUNE = False _logger = logging.getLogger('train') # resume if RESUME: setattr(args, 'save_root', 'results/{}'.format(osp.basename(args.resume))) else: snapshot_name = '_'.join( [args.snap, datetime.now().strftime("%Y%m%d-%H%M%S")]) if len(args.store_name) == 0: args.store_name = snapshot_name setattr(args, 'save_root', 'results/{}'.format(args.store_name)) # make dirs if args.local_rank == 0: utils.check_rootfolders(args) else: time.sleep(1) # setup logging setup_default_logging( log_path=os.path.join(args.save_root, args.root_log, 'run.log')) _logger.info("save experiment to :{}".format(args.save_root)) # save args if args.local_rank == 0: args_string = pformat(args.__dict__) _logger.info(args_string) # reset random torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) np.random.seed(args.seed) # if distributed if args.distributed and 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 args.device = 'cuda' args.world_size = 1 args.rank = 0 # global rank if args.distributed: args.device = 'cuda:%d' % args.local_rank torch.cuda.set_device(args.local_rank) dist.init_process_group(backend='nccl', init_method='env://') args.world_size = dist.get_world_size() args.rank = dist.get_rank() _logger.info( 'Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.' % (args.rank, args.world_size)) # else: # _logger.info('Training with a single process on 1 GPUs.') assert args.rank >= 0 utils.synchronize() # loss_fn criterion = utils.Focal_Loss(alpha=args.focal_alpha) # leave one subject out cross validation img_dirs = utils.get_img_dirs(args.dataset) img_dirs_dict = utils.leave_one_out( img_dirs, args.dataset) # key -> [train_set, val_set] # finetuen and resume if RESUME: total_MNA = np.load(osp.join(args.resume, args.root_output, 'cross_validation_MNA_dict.npy'), allow_pickle=True).item() match_regions_record_all = np.load(osp.join( args.resume, args.root_output, 'match_regions_record_all.npy'), allow_pickle=True).item() if not FINETUNE: keys1 = list(total_MNA.keys()) # keys2 = list(match_regions_record_all.keys()) rm_key = keys1[-1] # after python 3.6, order is guaranteed if args.delete_last: # delete the last subject results total_MNA, match_regions_record_all = utils.delete_records( total_MNA, match_regions_record_all, rm_key) if args.local_rank == 0: _logger.info('resume from subject {} (include)'.format(rm_key)) elif args.local_rank == 0: _logger.info('resume from subject {} (not include)'.format(rm_key)) else: if args.local_rank == 0: _logger.info('finetune subjects: [{}]'.format(','.join( args.finetune_list))) else: total_MNA = {} # store all cross-validation results match_regions_record_all = {} utils.synchronize() for vi, (val_id, [train_dirs, val_dirs]) in enumerate(img_dirs_dict.items()): # leave {val_id} out... # FINETUNE has higher priority than RESUME if FINETUNE and (val_id not in args.finetune_list): continue # skip subjects that do not need finetune if RESUME and (not FINETUNE) and (val_id in total_MNA): continue # skip from resume if val_id in args.finetune_list: # delete records total_MNA, match_regions_record_all = utils.delete_records( total_MNA, match_regions_record_all, val_id) if args.data_option == 'diff': inchannel = args.L elif args.data_option == 'wt_diff': inchannel = 4 * args.L elif args.data_option == 'wt_dr': inchannel = ( args.L + 1 - 11 + 1) * 2 * 4 # gauss kernel size = 11, *2 = dr1,dr2, *4 = 4 bands # amp amp_autocast = suppress # do nothing loss_scaler = None if args.amp: amp_autocast = torch.cuda.amp.autocast loss_scaler = NativeScaler() if args.local_rank == 0: _logger.info( 'Using native Torch AMP. Training in mixed precision.') else: if args.local_rank == 0: _logger.info('AMP not enabled. Training in float32.') # model model = Two_Stream_RNN_Cls(mlp_hidden_units=args.hidden_units, inchannel=inchannel, outchannel=2) # load pretrained if osp.exists(args.load_pretrained): model = load_pretrained_model(model, args.load_pretrained, args.load_bn) if args.local_rank == 0: _logger.info('Load pretrained model from {}[load_bn: {}]'.format( args.load_pretrained, args.load_bn)) # pytorch_total_params = sum(p.numel() for p in model.parameters() # if p.requires_grad) # print("Total Params: {}".format(pytorch_total_params)) model = model.cuda() # setup synchronized BatchNorm for distributed training if args.distributed: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) # if args.local_rank == 0: # _logger.info( # 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using ' # 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.' # ) # optimizer if args.optim == 'SGD': optimizer = torch.optim.SGD( [p for p in model.parameters() if p.requires_grad], args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optim == 'Adam': optimizer = torch.optim.Adam( [p for p in model.parameters() if p.requires_grad], args.lr, weight_decay=args.weight_decay) else: raise NotImplementedError # setup distributed training if args.distributed: model = DistributedDataParallel(model, device_ids=[args.local_rank], find_unused_parameters=True) else: model = DataParallel(model).cuda() # dataset Dataset = SAMMDataset if args.dataset == 'SAMM' else CASME_2Dataset def create_dataset(): train_dataset = Dataset( mode='train', img_dirs=train_dirs, seq_len=args.length, step=args.step, # step=1000, # !! time_len=args.L, input_size=args.input_size, data_aug=args.data_aug, data_option=args.data_option) val_dataset = Dataset( mode='test', img_dirs=val_dirs, seq_len=args.length, step=args.length, # assert no overlap # step=1000, # !! time_len=args.L, input_size=args.input_size, data_aug=False, data_option=args.data_option) return train_dataset, val_dataset train_dataset, val_dataset = create_dataset() if args.distributed: val_sampler = OrderedDistributedSampler(val_dataset) train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset) else: val_sampler = None train_sampler = None train_loader = torch.utils.data.DataLoader(train_dataset, shuffle=train_sampler is None, sampler=train_sampler, batch_size=args.batch_size, drop_last=False, num_workers=args.workers, pin_memory=False) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, sampler=val_sampler, num_workers=0, pin_memory=False, drop_last=False) if args.local_rank == 0: _logger.info('<' * 10 + ' {} '.format(val_id) + '<' * 10) best_f_score = -1000.0 best_loss = 1000.0 val_accum_epochs = 0 for epoch in range(args.epochs): if train_sampler is not None: train_sampler.set_epoch(epoch) utils.adjust_learning_rate(optimizer, epoch, args.lr, args.weight_decay, args.lr_steps, args.lr_decay_factor) trainer.train(train_loader, model, criterion, optimizer, epoch, _logger, args, amp_autocast, loss_scaler) utils.synchronize() # bn syn if args.distributed: if args.local_rank == 0: _logger.info("Distributing BatchNorm running means and vars") distribute_bn(model, args.world_size, True) # true for reduce, false for broadcast # logging if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1: loss_val, pred_and_gt = trainer.validate(val_loader, model, criterion, _logger, args, amp_autocast) # distributed synchronize pred_and_gt = utils.synchronize_pred_and_gt( pred_and_gt, epoch, args) # eval if args.local_rank == 0: precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels( pred_and_gt, val_id, epoch, args) else: f_score = -10.0 MNA = (0, 0, 0) # precision, recall, f_score, MNA, match_regions_record = utils.evaluate_bi_labels( # pred_and_gt, val_id, epoch, args) utils.synchronize() # synchronize f_score = utils.synchronize_f_score(f_score, args) _logger.info('f_score of processor {}: {:.4f}'.format( args.local_rank, f_score)) MNA = utils.synchronize_list(MNA, args) _logger.info('MNA of processor {}: {}'.format( args.local_rank, MNA)) is_equal_score = f_score == best_f_score is_best_loss = loss_val < best_loss best_loss = min(loss_val, best_loss) is_best_score = f_score > best_f_score best_f_score = max(best_f_score, f_score) # save checkpoint if args.local_rank == 0: _logger.info( 'Test[{}]: loss_val: {:.4f} (best: {:.4f}), f-score: {:.4f} (best: {:.4f})' .format(epoch, loss_val, best_loss, f_score, best_f_score)) utils.save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), }, is_best_score, args.save_root, args.root_model, filename=val_id) utils.synchronize() if is_best_score or (is_equal_score and MNA[1] < total_MNA.get(val_id, [0, 0, 0])[1]): val_accum_epochs = 0 total_MNA.update( {val_id: MNA}) # processor 0 need this record for branch selection if args.local_rank == 0: match_regions_record_all.update( match_regions_record ) # only processor 0 need this record out_dir = osp.join(args.save_root, args.root_output, val_id) os.makedirs(out_dir, exist_ok=True) np.save(osp.join(out_dir, 'match_regions_record_best.npy'), match_regions_record) # all np.save( osp.join(args.save_root, args.root_output, 'cross_validation_MNA_dict.npy'), total_MNA) np.save( osp.join(args.save_root, args.root_output, 'match_regions_record_all.npy'), match_regions_record_all) precision, recall, f_score = utils.calculate_metric_from_dict_MNA( total_MNA) _logger.info( 'Test[all] Avg f-score now: {:.4f}'.format(f_score)) utils.synchronize() else: val_accum_epochs += 1 if val_accum_epochs >= args.early_stop: _logger.info( "validation ccc did not improve over {} epochs, stop processor {}" .format(args.early_stop, args.local_rank)) break if args.local_rank == 0: precision_all, recall_all, f_score_all = utils.calculate_metric_from_dict_MNA( total_MNA) _logger.critical( '[{}][{}]/[{}] f_score: {:.4f}, precision_all: {:.4f}, recall_all: {:.4f}, f_score_all: {:.4f}' .format(val_id, vi + 1, len(img_dirs_dict), best_f_score, precision_all, recall_all, f_score_all)) # store results if args.local_rank == 0: np.save( osp.join(args.save_root, args.root_output, 'cross_validation_MNA_dict.npy'), total_MNA) np.save( osp.join(args.save_root, args.root_output, 'match_regions_record_all.npy'), match_regions_record_all) _logger.info('ALL DONE') exit()
/model/network.py
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import glob import os import os.path as osp from torch.serialization import load class MLP(nn.Module): def __init__(self, hidden_units, dropout=0.3): super(MLP, self).__init__() input_feature_dim = hidden_units[0] num_layers = len(hidden_units) - 1 assert num_layers > 0 assert hidden_units[-1] == 256 fc_list = [] for hidden_dim in hidden_units[1:]: fc_list += [ nn.Dropout(dropout), nn.Linear(input_feature_dim, hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True) ] input_feature_dim = hidden_dim self.mlp = nn.Sequential(*fc_list) def forward(self, input_tensor): bs, num_frames, feature_dim = input_tensor.size() input_tensor = input_tensor.reshape(bs * num_frames, feature_dim) out = self.mlp(input_tensor) return out.reshape(bs, num_frames, -1) class Temporal_Net(nn.Module): def __init__(self, input_size, num_channels, hidden_units, dropout, feature): super().__init__() assert input_size in [112, 128, 224, 256] self.feature = feature # return feature before classification # 4 layers conv net self.conv_net = [] self.conv_net.append( self._make_conv_layer(num_channels, 2**6, stride=2)) for i in range(7, 10): self.conv_net.append( self._make_conv_layer(2**(i - 1), 2**i, stride=2)) self.conv_net = nn.Sequential(*self.conv_net) last_conv_width = input_size // (2**4) last_conv_dim = 2**9 self.dropout = nn.Dropout2d(p=0.2) # self.avgpool = nn.AvgPool2d( # kernel_size=[last_conv_width, last_conv_width]) fc_list = [] fc_list += [ nn.Linear(last_conv_dim, hidden_units[0]), nn.ReLU(inplace=True), nn.BatchNorm1d(hidden_units[0]), nn.Dropout(dropout) ] for i in range(0, len(hidden_units) - 2): fc_list += [ nn.Linear(hidden_units[i], hidden_units[i + 1]), nn.ReLU(inplace=True), nn.BatchNorm1d(hidden_units[i + 1]), nn.Dropout(dropout) ] self.fc = nn.Sequential(*fc_list) # not used final_norm = nn.BatchNorm1d(1, eps=1e-6, momentum=0.1) self.classifier = nn.Sequential( nn.Linear(hidden_units[-2], hidden_units[-1]), final_norm) def _make_conv_layer(self, in_c, out_c, kernel_size=3, stride=2): ks = kernel_size conv_layer = nn.Sequential( nn.Conv2d(in_c, out_c, kernel_size=(ks, ks), padding=ks // 2), nn.BatchNorm2d(out_c, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.ReLU(inplace=True), nn.Conv2d(out_c, out_c, kernel_size=(ks, ks), padding=ks // 2, stride=stride), nn.BatchNorm2d(out_c, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.ReLU(inplace=True), ) return conv_layer def forward(self, wt_data): bs, num_frames, num_channel, W0, H0 = wt_data.size() wt_data = wt_data.reshape(bs * num_frames, num_channel, W0, H0) conv_out = self.conv_net(wt_data) avgpool = F.adaptive_avg_pool2d(conv_out, (1, 1)) # avgpool = self.avgpool(conv_out) avgpool = avgpool.reshape(bs * num_frames, -1) out = self.fc(avgpool) if self.feature: return out else: out = self.classifier(out) return out class Two_Stream_RNN(nn.Module): def __init__(self, mlp_hidden_units=[2048, 256, 256], dropout=0.3, inchannel=12, size=256, outchannel=4): super().__init__() self.mlp = MLP(mlp_hidden_units) self.temporal_net = Temporal_Net(size, inchannel, hidden_units=[256, 256, 1], dropout=0.3, feature=True) self.transform = nn.Sequential(nn.Linear(512, 256), nn.ReLU(inplace=True), nn.BatchNorm1d(256), nn.Dropout(dropout)) self.rnns = nn.GRU(256, 128, bidirectional=True, num_layers=2, dropout=0.3, batch_first=True) self.classifier = nn.Sequential(nn.Dropout(dropout), nn.Linear(256, outchannel), nn.BatchNorm1d(outchannel), nn.ReLU()) _init_weights(self) def forward(self, temp_data, rgb_data, return_feature=False): bs, num_frames = rgb_data.size(0), rgb_data.size(1) # spatial features features_cnn = self.mlp(rgb_data) features_spatial = features_cnn.reshape(bs, num_frames, -1) # temporal features features_temporal = self.temporal_net(temp_data) features_temporal = features_temporal.reshape(bs, num_frames, -1) features = torch.cat([features_spatial, features_temporal], dim=-1) features = self.transform(features.reshape(bs * num_frames, -1)) features = features.reshape(bs, num_frames, -1) # rnn combination outputs_rnns, _ = self.rnns(features) outputs_rnns = outputs_rnns.reshape(bs * num_frames, -1) out = self.classifier(outputs_rnns) out = out.reshape(bs, num_frames, -1) if return_feature: return out # anno transforms out[..., 0] = torch.log(out[..., 0] + 1) return out class Two_Stream_RNN_Cls(Two_Stream_RNN): def __init__(self, mlp_hidden_units=[2048, 256, 256], dropout=0.3, inchannel=12, size=256, outchannel=2): super().__init__(mlp_hidden_units=mlp_hidden_units, dropout=dropout, inchannel=inchannel, size=size, outchannel=outchannel) self.classifier = nn.Sequential(nn.Dropout(dropout), nn.Linear(256, outchannel)) _init_weights(self) def forward(self, temp_data, rgb_data): out = super().forward(temp_data, rgb_data, return_feature=True) return out class ResNet50_Cls(nn.Module): def __init__(self, num_class=2): super().__init__() self.fc = nn.Sequential(nn.Linear(2048, 512), nn.Dropout(0.5), nn.Linear(512, num_class)) def forward(self, x): assert x.shape[-1] == 2048 x = self.fc(x) return x def _init_weights(model): for k, m in model.named_modules(): if isinstance(m, (nn.Conv3d, nn.Conv2d, nn.Conv1d)): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') # nn.init.xavier_normal_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.BatchNorm3d, nn.BatchNorm2d, nn.BatchNorm1d)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.Linear)): nn.init.xavier_normal_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) def load_pretrained_model(model, path, load_bn): model_dict = model.state_dict() state_dict = torch.load(path, map_location='cpu')['state_dict'] state_dict = { k.replace('wt_net', 'temporal_net', 1): v for k, v in state_dict.items() } # bn filter if not load_bn: bn_keys = [] for k in state_dict.keys(): if 'running_mean' in k: bn_name = '.'.join(k.split('.')[:-1]) for name in [ 'weight', 'bias', 'running_mean', 'running_var', 'num_batches_tracked' ]: bn_keys.append(bn_name + '.' + name) state_dict = {k: v for k, v in state_dict.items() if k not in bn_keys} # # module name rank adjust # for k, v in state_dict.items(): # if 'mlp.mlp.5' in k: # state_dict[k.replace('mlp.mlp.5', 'mlp.mlp.4')] = v # del state_dict[k] # if 'temporal_net.fc.4' in k: # state_dict[k.replace('temporal_net.fc.4', # 'temporal_net.fc.3')] = v # del state_dict[k] # classifier filter state_dict = {k: v for k, v in state_dict.items() if 'classifier' not in k} model_dict.update(state_dict) model.load_state_dict(model_dict) return model
/model/utils.py
import torch.nn as nn def init_weights(model): for k, m in model.named_modules(): if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') # nn.init.xavier_normal_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.xavier_normal_(m.weight) if m.bias is not None: nn.init.constant_(m.bias, 0)
/paths.py
# SAMM SAMM_ROOT = '/data/gjz_mm21/SAMM' SAMM_LABEL_DIR = SAMM_ROOT SAMM_VIDEO_DIR = '/data/gjz_mm21/SAMM/SAMM_longvideos' # CASME_2 CASME_2_ROOT = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped' CASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels' CASME_2_VIDEO_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/longVideoFaceCropped'
/preprocess/CNN_feature_extraction.py
from __future__ import division from typing import Iterable import cv2 import os import time import six import sys from tqdm import tqdm import argparse import pickle import torch import torch.nn as nn import numpy as np import pandas as pd import torch.utils.data import os.path as osp import torch.backends.cudnn as cudnn import torchvision.transforms as transforms from glob import glob import numbers from PIL import Image, ImageOps import random import params # for torch lower version import torch._utils from torch.nn import functional as F try: torch._utils._rebuild_tensor_v2 except AttributeError: def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks): tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride) tensor.requires_grad = requires_grad tensor._backward_hooks = backward_hooks return tensor torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2 global parsed import torch.utils.data as data # multi thread setting os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1" cv2.ocl.setUseOpenCL(False) cv2.setNumThreads(0) class SAMMDataset(data.Dataset): def __init__(self, data_root, transform=None): super().__init__() self.img_ps = glob(osp.join(data_root, '*/*.jpg')) self.transform = transform def __len__(self): return len(self.img_ps) def __getitem__(self, index): img = Image.open(self.img_ps[index]).convert('RGB') img = self.transform(img) if self.transform is not None else img return img, self.img_ps[index] class CASME_2Dataset(SAMMDataset): def __init__(self, data_root, transform=None): super().__init__(data_root, transform) self.img_ps = glob(osp.join(data_root, '*/*/*.jpg')) def load_module_2or3(model_name, model_def_path): """Load model definition module in a manner that is compatible with both Python2 and Python3 Args: model_name: The name of the model to be loaded model_def_path: The filepath of the module containing the definition Return: The loaded python module.""" if six.PY3: import importlib.util spec = importlib.util.spec_from_file_location(model_name, model_def_path) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) else: import importlib dirname = os.path.dirname(model_def_path) sys.path.insert(0, dirname) module_name = os.path.splitext(os.path.basename(model_def_path))[0] mod = importlib.import_module(module_name) return mod def load_model(model_name, MODEL_DIR): """Load imoprted PyTorch model by name Args: model_name (str): the name of the model to be loaded Return: nn.Module: the loaded network """ model_def_path = osp.join(MODEL_DIR, model_name + '.py') weights_path = osp.join(MODEL_DIR, model_name + '.pth') mod = load_module_2or3(model_name, model_def_path) func = getattr(mod, model_name) net = func(weights_path=weights_path) return net def compose_transforms(meta, resize=256, center_crop=True, override_meta_imsize=False): """Compose preprocessing transforms for model The imported models use a range of different preprocessing options, depending on how they were originally trained. Models trained in MatConvNet typically require input images that have been scaled to [0,255], rather than the [0,1] range favoured by PyTorch. Args: meta (dict): model preprocessing requirements resize (int) [256]: resize the input image to this size center_crop (bool) [True]: whether to center crop the image override_meta_imsize (bool) [False]: if true, use the value of `resize` to select the image input size, rather than the properties contained in meta (this option only applies when center cropping is not used. Return: (transforms.Compose): Composition of preprocessing transforms """ normalize = transforms.Normalize(mean=meta['mean'], std=meta['std']) im_size = meta['imageSize'] assert im_size[0] == im_size[1], 'expected square image size' if center_crop: transform_list = [ transforms.Resize(resize), transforms.CenterCrop(size=(im_size[0], im_size[1])) ] else: if override_meta_imsize: im_size = (resize, resize) transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))] transform_list += [transforms.ToTensor()] if meta['std'] == [1, 1, 1]: # common amongst mcn models transform_list += [lambda x: x * 255.0] transform_list.append(normalize) return transforms.Compose(transform_list) def augment_transforms(meta, resize=256, random_crop=True, override_meta_imsize=False): normalize = transforms.Normalize(mean=meta['mean'], std=meta['std']) im_size = meta['imageSize'] assert im_size[0] == im_size[1], 'expected square image size' if random_crop: v = random.random() transform_list = [ transforms.Resize(resize), RandomCrop(im_size[0], v), RandomHorizontalFlip(v) ] else: if override_meta_imsize: im_size = (resize, resize) transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))] transform_list += [transforms.ToTensor()] if meta['std'] == [1, 1, 1]: # common amongst mcn models transform_list += [lambda x: x * 255.0] transform_list.append(normalize) return transforms.Compose(transform_list) class RandomCrop(object): def __init__(self, size, v): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size self.v = v def __call__(self, img): w, h = img.size th, tw = self.size x1 = int((w - tw) * self.v) y1 = int((h - th) * self.v) #print("print x, y:", x1, y1) assert (img.size[0] == w and img.size[1] == h) if w == tw and h == th: out_image = img else: out_image = img.crop( (x1, y1, x1 + tw, y1 + th)) #same cropping method for all images in the same group return out_image class RandomHorizontalFlip(object): """Randomly horizontally flips the given PIL.Image with a probability of 0.5 """ def __init__(self, v): self.v = v return def __call__(self, img): if self.v < 0.5: img = img.transpose(Image.FLIP_LEFT_RIGHT) #print ("horiontal flip: ",self.v) return img def get_vec(model, layer_name, image): bs = image.size(0) if parsed.layer_name == 'pool5_full': layer_name = 'pool5' layer = model._modules.get(layer_name) if parsed.layer_name == 'fc7': layer_output_size = 4096 my_embedding = torch.zeros(bs, layer_output_size) elif parsed.layer_name == 'fc8': my_embedding = torch.zeros(bs, 7) elif parsed.layer_name == 'pool5' or parsed.layer_name == 'pool5_full': my_embedding = torch.zeros([bs, 512, 7, 7]) elif parsed.layer_name == 'pool4': my_embedding = torch.zeros([bs, 512, 14, 14]) elif parsed.layer_name == 'pool3': my_embedding = torch.zeros([bs, 256, 28, 28]) elif parsed.layer_name == 'pool5_7x7_s1': my_embedding = torch.zeros([bs, 2048, 1, 1]) elif parsed.layer_name == 'conv5_3_3x3_relu': my_embedding = torch.zeros([bs, 512, 7, 7]) def copy_data(m, i, o): my_embedding.copy_(o.data) h = layer.register_forward_hook(copy_data) h_x = model(image) h.remove() if parsed.layer_name == 'pool5' or parsed.layer_name == 'conv5_3_3x3_relu': GAP_layer = nn.AvgPool2d(kernel_size=[7, 7], stride=(1, 1)) my_embedding = GAP_layer(my_embedding) return F.relu(my_embedding.squeeze()) def get_frame_index(frame_path): frame_name = frame_path.split('/')[-1] frame_num = int(frame_name.split('.')[0].split('_')[-1]) return frame_num def predict(data_loader, layer_name, model, des_dir): with torch.no_grad(): for ims, img_path in tqdm(data_loader): ims = ims.cuda() output = get_vec(model, layer_name, ims) if not len(output.shape) == 2: output = [ output, ] img_path = [ img_path, ] for feature, path in zip(output, img_path): basename = osp.basename(path) des_basename = basename.split('.')[0] + '.npy' des_path = path.replace(basename, des_basename) np.save(des_path, feature) def feature_extraction(model, loader, des_dir): model.eval() predict(loader, parsed.layer_name, model, des_dir) def main(): os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = '1' MODEL_DIR = params.MODEL_DIR model_name = 'resnet50_ferplus_dag' model = load_model(model_name, MODEL_DIR) model = model.cuda() meta = model.meta preproc_transforms = compose_transforms( meta, center_crop=False) if not parsed.augment else augment_transforms( meta, random_crop=True) if parsed.dataset == 'SAMM': dataset = SAMMDataset(params.SAMM_VIDEO_DIR, preproc_transforms) # parsed.save_root = params.SAMM_FEATURE_DIR elif parsed.dataset == 'CASME_2': dataset = CASME_2Dataset(params.CASME_2_VIDEO_DIR, preproc_transforms) # parsed.save_root = params.CASME_2_FEATURE_DIR else: raise NotImplementedError data_loader = torch.utils.data.DataLoader(dataset, batch_size=4, num_workers=0, pin_memory=False) des_dir = None # des_dir = osp.join( # parsed.save_root, '_'.join([ # '{}_features'.format(model_name), 'fps=' + str(parsed.fps), # parsed.layer_name # ])) # os.makedirs(des_dir, exist_ok=True) feature_extraction(model, data_loader, des_dir) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Run.') parser.add_argument('--refresh', dest='refresh', action='store_true', help='refresh feature cache') parser.add_argument('--fps', type=int, default=0, help='frames per second to extract') parser.add_argument('--layer_name', type=str, default='pool5_7x7_s1') parser.add_argument( '--augment', action="store_true", help='whether to extract augmented features for train set only ') parser.add_argument('--dataset', type=str, default='') parsed = parser.parse_args() parsed.dataset = 'SAMM' main()
/preprocess/casme_2_label_generation.py
''' generate the emotion intensity of each frame ''' # %% import pdb import os import os.path as osp from numpy.core.numeric import ones, ones_like from numpy.lib.function_base import percentile import pandas as pd import numpy as np import matplotlib.pyplot as plt import params # %% ID2NAME and NAME2ID # CASME_2_PID2NAME/NAME2PID df = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'naming_rule1.csv')) data = df.values CASME_2_PID2NAME = {str(line[-1]): str(line[1]) for line in data} CASME_2_NAME2PID = {str(line[1]): str(line[-1]) for line in data} del df del data # CASME_2_VID2NAME df = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'naming_rule2.csv')) data = df.values CASME_2_VID2NAME = {'{:04d}'.format(line[0]): str(line[1]) for line in data} CASME_2_NAME2VID = {str(line[1]): '{:04d}'.format(line[0]) for line in data} del df del data save_dict_dir = osp.join(params.CASME_2_ROOT, 'ID2NAME2ID') os.makedirs(save_dict_dir, exist_ok=True) for p, d in zip( ['pid2name', 'name2pid', 'vid2name', 'name2vid'], [CASME_2_PID2NAME, CASME_2_NAME2PID, CASME_2_VID2NAME, CASME_2_NAME2VID]): np.save(osp.join(save_dict_dir, p + '.npy'), d) # %% main anno_dict = {} label_dict = {} # 0: none, 1: macro, 2: micro pred_gt = {} # [[onset, offset, label],...] bi_label_dict = {} # store all the img_ps fall into the spotting interval df = pd.read_csv(osp.join(params.CASME_2_LABEL_DIR, 'CASFEcode_final.csv')) data = df.values for row in data: # construct imgs dir for current row data pid = str(row[0]) vname = row[1].split('_')[0] pname = CASME_2_PID2NAME[pid] vid = CASME_2_NAME2VID[vname] name_code = pname[1:] imgs_file_head = name_code + '_' + vid for file_name in os.listdir(osp.join(params.CASME_2_VIDEO_DIR, pname)): if file_name.startswith(imgs_file_head): imgs_dir = osp.join(params.CASME_2_VIDEO_DIR, pname, file_name) break # update emotion intensity and label imgs_name = [ name for name in sorted(os.listdir(imgs_dir), key=lambda x: int(x.split('.')[0].split('_')[-1])) if '.jpg' in name ] # first img name: img_1.jpg onset, apex, offset = row[2:2 + 3] onset, apex, offset = int(onset), int(apex), int(offset) if onset > 0 and apex > 0 and offset > 0: pass elif onset > 0 and apex > 0 and offset == 0: offset = min(len(imgs_name), apex + (apex - onset)) elif onset > 0 and apex == 0 and offset > 0: apex = (onset + offset) // 2 else: raise Exception try: assert onset < apex and apex < offset except: print('[Error][{}] onset: {}, apex: {}, offset: {}, '.format( imgs_dir, onset, apex, offset)) continue # skip this row if not imgs_dir in anno_dict: anno_dict[imgs_dir] = np.zeros(len(imgs_name)) label_dict[imgs_dir] = np.zeros(len(imgs_name)) pred_gt[imgs_dir] = [] bi_label_dict[imgs_dir] = [] # convert start index from 1 to 0 onset -= 1 apex -= 1 offset -= 1 # intensity sigma = min(offset - apex, apex - onset) // 2 mu = apex func = lambda x: np.exp(-(x - mu)**2 / 2 / sigma / sigma ) / sigma / np.sqrt(2 * np.pi) # func = lambda x: (x - onset) / (apex - onset) if x >= apex else ( # offset - x) / (offset - apex) cumsum = 0 for i in range(onset, offset + 1): anno_dict[imgs_dir][i] += func(i) cumsum += anno_dict[imgs_dir][i] if cumsum < 0: pdb.set_trace() # print('onset2offset cumsum: {:.2f}'.format(cumsum)) # label label_dict[imgs_dir][onset:offset + 1] = 1 if 'macro' in str(row[-2]).lower() else 2 # pred_gt pred_gt[imgs_dir].append( [onset, offset + 1, 1 if 'macro' in str(row[-2]).lower() else 2]) # bi_label bi_label_dict[imgs_dir].extend( [osp.join(imgs_dir, name) for name in imgs_name[onset:offset + 1]]) np.save(osp.join(params.CASME_2_LABEL_DIR, 'anno_dict.npy'), anno_dict) np.save(osp.join(params.CASME_2_LABEL_DIR, 'label_dict.npy'), label_dict) np.save(osp.join(params.CASME_2_LABEL_DIR, 'pred_gt.npy'), pred_gt) np.save(osp.join(params.CASME_2_LABEL_DIR, 'bi_label.npy'), bi_label_dict) # %% visulization # fig = plt.figure(figsize=(30, 50)) # for i, (k, v) in enumerate(anno_dict.items()): # fig.add_subplot((len(anno_dict) - 1) // 5 + 1, 5, i + 1) # plt.plot(v) # fig.tight_layout() # plt.savefig('./CASME_2_annos.pdf') # plt.show() column = 5 fig = plt.figure(figsize=(30, ((len(label_dict) - 1) // column + 1) * 2)) for i, (k, v) in enumerate(label_dict.items()): v[v > 0] = 1 # 1,2 -> 1 fig.add_subplot((len(label_dict) - 1) // column + 1, column, i + 1) plt.plot(v, 'r-') plt.title(osp.basename(k)) fig.tight_layout() out_dir = './preprocess' plt.savefig(osp.join(out_dir, 'ca_bi_label.pdf')) plt.close('all') # %%
/preprocess/openface/face_crop_align.py
import os import os.path as osp from tqdm import tqdm from glob import glob from video_processor import Video_Processor import params # OpenFace parameters save_size = 224 OpenFace_exe = params.OpenFace_exe quiet = True nomask = True grey = False tracked_vid = False noface_save = False # dataset video_root = params.video_root # main video_processor = Video_Processor(save_size, nomask, grey, quiet, tracked_vid, noface_save, OpenFace_exe) video_ps = list(glob(osp.join(video_root, '*/*mp4'))) video_ps.extend(list(glob(osp.join(video_root, '*/*avi')))) for video_p in tqdm(video_ps): video_name = os.path.basename(video_p).split('.')[0] opface_output_dir = os.path.join(os.path.dirname(video_p), video_name + "_opface") video_processor.process(video_p, opface_output_dir)
/preprocess/params.py
# CASME_2 CASME_2_ROOT = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped' CASME_2_LABEL_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/labels' CASME_2_VIDEO_DIR = '/data/gjz_mm21/CASME_2_LongVideoFaceCropped/CASME_2_longVideoFaceCropped/longVideoFaceCropped' # SAMM SAMM_ROOT = '/data/gjz_mm21/SAMM' SAMM_VIDEO_DIR = '/data/gjz_mm21/SAMM/SAMM_longvideos' # resnet50 features MODEL_DIR = '/home/gjz/fmr_backbone/pytorch-benchmarks/ferplus'
/preprocess/samm_2_label_generation.py
''' generate the emotion intensity of each frame ''' # %% import os import pdb import os.path as osp from numpy.core.numeric import ones from numpy.lib.function_base import percentile import pandas as pd import numpy as np import matplotlib.pyplot as plt import params # %% main anno_dict = {} # intensity label_dict = {} # 0: none, 1: macro, 2: micro pred_gt = {} # [[onset, offset, label],...] bi_label_dict = {} # store all the img_ps fall into the spotting interval df = pd.read_csv(osp.join(params.SAMM_ROOT, 'SAMM_labels.csv')) data = df.values for row in data: # construct imgs dir for current row data file_name = row[1][:5] imgs_dir = osp.join(params.SAMM_VIDEO_DIR, file_name) assert osp.exists(imgs_dir) # update emotion intensity and label imgs_name = [ name for name in sorted(os.listdir(imgs_dir), key=lambda x: int(x.split('.')[0].split('_')[-1])) if '.jpg' in name ] # first img name: xxx_x_0001.jpg onset, apex, offset = row[3:3 + 3] onset, apex, offset = int(onset), int(apex), int(offset) if onset > 0 and apex > 0 and offset > 0: pass elif onset > 0 and apex > 0 and offset == -1: offset = min(len(imgs_name), apex + (apex - onset)) elif onset > 0 and apex == -1 and offset > 0: apex = (onset + offset) // 2 else: raise Exception try: assert onset < apex and apex < offset except: print('[Error][{}] onset: {}, apex: {}, offset: {}, '.format( imgs_dir, onset, apex, offset)) continue # skip this row if not imgs_dir in anno_dict: anno_dict[imgs_dir] = np.zeros(len(imgs_name)) label_dict[imgs_dir] = np.zeros(len(imgs_name)) pred_gt[imgs_dir] = [] bi_label_dict[imgs_dir] = [] # convert start index from 1 to 0 onset -= 1 apex -= 1 offset -= 1 # intensity sigma = min(offset - apex, apex - onset) // 2 + 1e-7 if sigma <= 0: pdb.set_trace() mu = apex func = lambda x: np.exp(-(x - mu)**2 / 2 / sigma / sigma ) / sigma / np.sqrt(2 * np.pi) cumsum = 0 for i in range(onset, offset + 1): anno_dict[imgs_dir][i] += func(i) cumsum += anno_dict[imgs_dir][i] # print('onset2offset cumsum: {:.2f}'.format(cumsum)) # label label_dict[imgs_dir][onset:offset + 1] = 1 if 'macro' in str(row[-2]).lower() else 2 # pred_gt pred_gt[imgs_dir].append( [onset, offset + 1, 1 if 'macro' in str(row[-2]).lower() else 2]) # bi_label bi_label_dict[imgs_dir].extend( [osp.join(imgs_dir, name) for name in imgs_name[onset:offset + 1]]) np.save(osp.join(params.SAMM_ROOT, 'anno_dict.npy'), anno_dict) np.save(osp.join(params.SAMM_ROOT, 'label_dict.npy'), label_dict) np.save(osp.join(params.SAMM_ROOT, 'pred_gt.npy'), pred_gt) np.save(osp.join(params.SAMM_ROOT, 'bi_label.npy'), bi_label_dict) # %% visulization # fig = plt.figure(figsize=(30, 50)) # for i, (k, v) in enumerate(anno_dict.items()): # fig.add_subplot((len(anno_dict) - 1) // 5 + 1, 5, i + 1) # plt.plot(v) # fig.tight_layout() # plt.savefig('./SAMM_annos.pdf') # plt.show() column = 5 fig = plt.figure(figsize=(30, ((len(label_dict) - 1) // column + 1) * 2)) for i, (k, v) in enumerate(label_dict.items()): v[v > 0] = 1 # 1,2 -> 1 fig.add_subplot((len(label_dict) - 1) // column + 1, column, i + 1) plt.plot(v, 'r-') plt.title(osp.basename(k)) fig.tight_layout() out_dir = './preprocess' plt.savefig(osp.join(out_dir, 'sa_bi_label.pdf')) plt.close('all') # %%
/submit.py
import pandas as pd import numpy as np import os.path as osp dataset = 'CASME_2' # dataset = 'SAMM' submit_name = 'submit_{}.csv'.format(dataset) result_dir_name = 'results' submit_npy_name = 'match_regions_record_all.npy' submit_id = 'done_exp_cls_ca_20210708-215035' def convert_key(k, dataset): if dataset == 'CASME_2': k = osp.basename(k)[:7] elif dataset == 'SAMM': k = osp.basename(k) else: raise NotImplementedError return k data = np.load(osp.join('.', result_dir_name, submit_id, 'output', submit_npy_name), allow_pickle=True).item() metric = {'TP': 0, 'FN': 0, 'FP': 0} with open(submit_name, 'w') as f: if dataset == 'CASME_2': f.write('2\r\n') elif dataset == 'SAMM': f.write('1\r\n') else: raise NotImplementedError for k, v in data.items(): k = convert_key(k, dataset) assert isinstance(v[0], list) for line in v: f.write(','.join([k, *[str(x) for x in line]]) + '\r\n') metric[line[-1]] += 1 precision = metric['TP'] / (metric['TP'] + metric['FP']) recall = metric['TP'] / (metric['TP'] + metric['FN']) f_score = 2 * precision * recall / (precision + recall) print('TP: {}, FP: {}, FN: {}'.format(metric['TP'], metric['FP'], metric['FN'])) print('P: {:.4f}, R: {:.4f}, F: {:.4f}'.format(precision, recall, f_score))
/trainer_cls.py
import time from matplotlib.pyplot import winter import torch import torch.nn.functional as F import numpy as np import utils import dataset.utils as dataset_utils import dataset.params as DATASET_PARAMS def train(dataloader, model, criterion, optimizer, epoch, logger, args, amp_autocast, loss_scaler): batch_time = utils.AverageMeter() data_time = utils.AverageMeter() losses = utils.AverageMeter() end = time.time() model.train() for i, data_batch in enumerate(dataloader): data_time.update(time.time() - end) temp_data, img_features, annos, labels, _ = data_batch batch_size = temp_data.shape[0] # # TODO: skip all zero samples # if (labels == 0).all() and np.random.rand() <= 0.7: # end = time.time() # # print('skip all zeros batch...') # continue # keep_ids = [] # for bi in range(batch_size): # if not ((labels[bi] == 0).all() and np.random.rand() <= 0.5): # keep_ids.append(bi) # # print('skip {} samples...'.format(batch_size - len(keep_ids))) # batch_size = len(keep_ids) # m batch_size # if batch_size == 0: # end = time.time() # # print('skip all zeros batch...') # continue # keep_ids = np.asarray(keep_ids) # temp_data = temp_data[keep_ids] # img_features = img_features[keep_ids] # annos = annos[keep_ids] # labels = labels[keep_ids] # label preprocess labels[labels > 0] = 1 # 1, 2 -> 1 temp_data = temp_data.cuda() img_features = img_features.cuda() # annos = annos.cuda() labels = labels.cuda() with amp_autocast(): out = model(temp_data, img_features) # flat labels out = out.reshape(batch_size * args.length, -1) labels = labels.reshape(-1) loss = criterion(out, labels) # backward + step optimizer.zero_grad() if loss_scaler is None: loss.backward() optimizer.step() else: loss_scaler(loss, optimizer) # distirbuted reduce utils.reduce_loss(loss, args) losses.update(loss.item(), temp_data.size(0)) batch_time.update(time.time() - end) if args.local_rank == 0 and (i % args.print_freq == 0 or i == len(dataloader) - 1): output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( epoch, i + 1, len(dataloader), batch_time=batch_time, data_time=data_time, loss=losses, lr=optimizer.param_groups[-1]['lr'])) logger.info(output) torch.cuda.synchronize() end = time.time() def validate(dataloader, model, criterion, logger, args, amp_autocast): batch_time = utils.AverageMeter() losses = utils.AverageMeter() model.eval() end = time.time() # outs = [] # annos = [] # labels = [] # pred_anno_dict = {} # imgs_dir -> anno values # pred_label_dict = {} # imgs_dir -> labels # anno_dict = {} # label_dict = {} pred_and_gt = {} # img_p -> [pred, target] for i, data_batch in enumerate(dataloader): temp_data, img_features, annos, labels, seq_info = data_batch # label preprocess labels[labels > 0] = 1 # 1, 2 -> 1 batch_size = labels.shape[0] temp_data = temp_data.cuda() img_features = img_features.cuda() # annos = annos.cuda() labels = labels.cuda() with torch.no_grad(): with amp_autocast(): out = model(temp_data, img_features) loss = criterion(out.reshape(batch_size * args.length, -1), labels.reshape(-1)).float() if not torch.isnan(loss).any(): # distirbuted reduce utils.reduce_loss(loss, args) losses.update(loss.item(), temp_data.size(0)) batch_time.update(time.time() - end) if args.local_rank == 0 and (i % args.print_freq == 0 or i == len(dataloader) - 1): output = ('Val: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format( i + 1, len(dataloader), batch_time=batch_time, loss=losses)) logger.info(output) torch.cuda.synchronize() # record img_dirs, fronts, tails = seq_info for batch_idx in range(batch_size): img_dir = img_dirs[batch_idx] front = fronts[batch_idx].item() tail = tails[batch_idx].item() # assert batch_size == 1, 'batch size should be 1' img_dir_ps = dataset_utils.scan_jpg_from_img_dir(img_dir) # if not img_dir in pred_label_dict: # pred_anno_dict[img_dir] = np.zeros(len(img_dir_ps)) # pred_label_dict[img_dir] = np.zeros(len(img_dir_ps)) # anno_dict = [img_dir] = np.zeros(len(img_dir_ps)) # label_dict = [img_dir] = np.zeros(len(img_dir_ps)) pred_label = torch.argmax(out[batch_idx], dim=-1).reshape(-1) label = labels[batch_idx].reshape(-1) for j in range(front, tail): img_p = img_dir_ps[j] pred_and_gt[img_p] = [ pred_label[j - front].item(), label[j - front].item() ] # pred_anno_dict[img_dir][front:tail] += pred_annos # assert (pred_label_dict[img_dir][front:tail] == 0 # ).all(), 'should be no overlap' # pred_label_dict[img_dir][front:tail] += pred_labels # anno_dict[img_dir][front:tail] += annos # label_dict[img_dir][front:tail] += labels end = time.time() return losses.avg, pred_and_gt
/utils.py
import os import sys import cv2 from timm.utils import reduce_tensor import torch import shutil import numpy as np import os.path as osp import torch.nn.functional as F import matplotlib.pyplot as plt import torch.distributed as dist from torch.nn.modules import loss from datetime import datetime import paths import dataset.utils as dataset_utils sys.setrecursionlimit(10000) class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count class Focal_Loss(torch.nn.Module): def __init__(self, alpha=[], gamma=2, num_class=2, epsilon=1e-7): super().__init__() if alpha == []: self.alpha = torch.ones(num_class) else: self.alpha = torch.tensor(alpha, dtype=torch.float32) self.gamma = gamma self.epsilon = epsilon def forward(self, pred, target): assert len(pred.shape) == 2, 'pred shape should be N, num_class' assert len(target.shape) == 1, 'target shape should be N' pred = torch.softmax(pred, dim=-1) target_pred = -F.nll_loss(pred, target, reduction='none') loss = -torch.log(target_pred + self.epsilon) class_alpha = torch.tensor([self.alpha[c.item()] for c in target], dtype=torch.float32).to(loss.device) weights = ((1 - target_pred)**self.gamma) * class_alpha loss = (weights * loss).mean() return loss class My_loss(torch.nn.Module): def __init__(self): super().__init__() self.focal_loss = Focal_Loss(num_class=3) def forward(self, out, anno_y, label_y): anno_x = out[..., 0] label_x = out[..., 1:] if len(anno_x.shape) == 2: anno_x = anno_x.reshape(-1) anno_y = anno_y.reshape(-1) # loss_ccc = -ccc(anno_x, anno_y)[0] # loss_mse = F.mse_loss(anno_x, anno_y) loss_l1 = F.l1_loss(anno_x, anno_y) # logits = F.log_softmax(label_x, dim=-1) # loss_ce = F.nll_loss(logits, label_y) if len(label_x.shape) == 3: label_x = label_x.reshape(-1, label_x.shape[-1]) label_y = label_y.reshape(-1) # loss_ce = F.cross_entropy(label_x, label_y, reduce='mean') # loss_focal = self.focal_loss(label_x, label_y) # loss = loss_ccc + loss_ce # loss = loss_mse + loss_ce # loss = loss_mse + loss_focal # loss = loss_mse * 100 # loss = loss_l1 * 100 + loss_focal loss = loss_l1 * 1000 return loss def ccc(y_pred, y_true, epsilon=1e-7): assert len(y_pred.shape) == 1 true_mean = y_true.mean() pred_mean = y_pred.mean() v_true = y_true - true_mean v_pred = y_pred - pred_mean rho = (v_pred * v_true).sum() / (torch.sqrt( (v_pred**2).sum()) * torch.sqrt((v_true**2).sum()) + epsilon) std_predictions = torch.std(y_pred) std_gt = torch.std(y_true) ccc = 2 * rho * std_gt * std_predictions / ( (std_predictions**2 + std_gt**2 + (pred_mean - true_mean)**2) + epsilon) return ccc, rho def img_dirs_filter(img_dirs, dataset): ''' some clips are not labeled... ''' _img_dirs = [] if dataset == 'SAMM': anno_dict = np.load(osp.join(paths.SAMM_LABEL_DIR, 'anno_dict.npy'), allow_pickle=True).item() elif dataset == 'CASME_2': anno_dict = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'anno_dict.npy'), allow_pickle=True).item() else: raise NotImplementedError for img_dir in img_dirs: if img_dir in anno_dict: _img_dirs.append(img_dir) else: print('clip: {} is not labeled or labeled incorrectly.'.format( img_dir)) return _img_dirs def get_img_dirs(dataset): if dataset == 'SAMM': img_dirs = [ osp.join(paths.SAMM_VIDEO_DIR, name) for name in os.listdir(paths.SAMM_VIDEO_DIR) ] elif dataset == 'CASME_2': _img_dirs = [[ osp.join(paths.CASME_2_VIDEO_DIR, name1, name2) for name2 in os.listdir(osp.join(paths.CASME_2_VIDEO_DIR, name1)) ] for name1 in os.listdir(paths.CASME_2_VIDEO_DIR)] img_dirs = [] for dirs in _img_dirs: img_dirs.extend(dirs) else: raise NotImplementedError img_dirs = img_dirs_filter(img_dirs, dataset) return img_dirs def leave_one_out(img_dirs, dataset): img_dirs_dict = {} img_dirs = sorted(img_dirs) if dataset == 'SAMM': keys = [] for img_dir in img_dirs: keys.append(osp.basename(img_dir).split('_')[0]) # 006, 007... keys = sorted(list(set(keys))) for key in keys: train_set = [] val_set = [] for img_dir in img_dirs: if key in img_dir: val_set.append(img_dir) else: train_set.append(img_dir) img_dirs_dict[key] = [train_set, val_set] elif dataset == 'CASME_2': keys = [] for img_dir in img_dirs: keys.append(img_dir.split('/')[-2]) # s15, s16... keys = sorted(list(set(keys))) for key in keys: train_set = [] val_set = [] for img_dir in img_dirs: if img_dir.split('/')[-2] == key: val_set.append(img_dir) else: train_set.append(img_dir) img_dirs_dict[key] = [train_set, val_set] else: raise NotImplementedError return img_dirs_dict def adjust_learning_rate(optimizer, epoch, lr_strat, wd, lr_steps, factor=0.1): """Sets the learning rate to the initial LR decayed by 10 every N epochs""" decay = factor**(sum(epoch >= np.asarray(lr_steps))) lr = lr_strat * decay decay = wd for param_group in optimizer.param_groups: param_group['lr'] = lr param_group['weight_decay'] = decay def save_checkpoint(state, is_best, save_root, root_model, filename='val'): torch.save( state, '%s/%s/%s_checkpoint.pth.tar' % (save_root, root_model, filename)) if is_best: shutil.copyfile( '%s/%s/%s_checkpoint.pth.tar' % (save_root, root_model, filename), '%s/%s/%s_best_loss.pth.tar' % (save_root, root_model, filename)) # print("checkpoint saved to", # '%s/%s/%s_best_loss.pth.tar' % (save_root, root_model, filename)) def check_rootfolders(args): """Create log and model folder""" folders_util = [ args.root_log, args.root_model, args.root_output, args.root_runs ] folders_util = [ "%s/" % (args.save_root) + folder for folder in folders_util ] for folder in folders_util: if not os.path.exists(folder): print('creating folder ' + folder) os.makedirs(folder) def evaluate(pred_anno_dict, pred_label_dict, dataset, threshold=0.9, val_id='all', epoch=-1, args=None): if dataset == 'SAMM': pred_gt = np.load(osp.join(paths.SAMM_ROOT, 'pred_gt.npy'), allow_pickle=True).item() anno_dict = np.load(osp.join(paths.SAMM_ROOT, 'anno_dict.npy'), allow_pickle=True).item() fps = 200 elif dataset == 'CASME_2': pred_gt = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'pred_gt.npy'), allow_pickle=True).item() anno_dict = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'anno_dict.npy'), allow_pickle=True).item() fps = 30 else: raise NotImplementedError result_dict = {} for img_dir, pred_annos in pred_anno_dict.items(): pred_labels = pred_label_dict[img_dir] gt_list = pred_gt[img_dir] pred_list = [] # scan all possible peak point for peak_idx in range(0, len(pred_annos), fps): is_peak = True front = peak_idx tail = peak_idx # label_sum = pred_labels[peak_idx] cumsum = pred_annos[peak_idx] while is_peak and cumsum < threshold and ( front > 0 or tail < len(pred_annos) - 1): if front - 1 >= 0: front -= 1 cumsum += pred_annos[front] # label_sum += pred_labels[front] if tail + 1 < len(pred_annos): tail += 1 cumsum += pred_annos[tail] # label_sum += pred_labels[tail] is_peak = pred_annos[peak_idx] >= pred_annos[ front] and pred_annos[peak_idx] >= pred_annos[tail] if is_peak and cumsum >= threshold: # TODO: label func pred_list.append([front, tail, -1]) M = len(gt_list) N = len(pred_list) A = 0 for [onset, offset, label_gt] in gt_list: for [ front, tail, _ ] in pred_list: # TODO: if one pred could match more than one gt? if front < onset: b1 = [front, tail] b2 = [onset, offset] else: b2 = [front, tail] b1 = [onset, offset] # 1 if b1[1] >= b2[0] and b2[1] >= b1[1]: overlap = b1[1] - b2[0] + 1 union = b2[1] - b1[0] + 1 elif b1[1] >= b2[1]: overlap = b2[1] - b2[0] + 1 union = b1[1] - b1[0] + 1 else: # no overlap overlap = 0 union = 1 if overlap / union >= 0.5: A += 1 break result_dict[img_dir] = [M, N, A] ret_info = [] M = 0 N = 0 A = 0 for key, (m, n, a) in result_dict.items(): # p = a / n # r = a / m # f = 2 * r * p / (p + r) # ret_info.append('[{}] P: {.4f}, R: {:.4f}, F1: {:.4f}'.format( # key, p, r, f)) M += m N += n A += a if M == 0 or N == 0 or A == 0: precision = -1.0 recall = -1.0 f_score = -1.0 else: precision = A / N recall = A / M f_score = 2 * recall * precision / (recall + precision) ret_info.append('[over all] P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format( precision, recall, f_score)) # save fig column = 3 fig = plt.figure(figsize=(10, ((len(pred_anno_dict) - 1) // column + 1) * 2)) for i, (img_dir, pred_annos) in enumerate(pred_anno_dict.items()): fig.add_subplot((len(pred_anno_dict) - 1) // column + 1, column, i + 1) plt.plot(pred_annos, 'b-', alpha=0.5) plt.plot(anno_dict[img_dir], 'r-', alpha=0.5) fig.tight_layout() plt.savefig( osp.join(args.save_root, args.root_output, '{}_anno_{}.pdf'.format(val_id, epoch))) plt.close('all') return ret_info, f_score, (M, N, A) def evaluate_bi_labels(pred_and_gt, val_id, epoch, args): keys = sorted(list(pred_and_gt.keys())) imgs_dirs = sorted(list(set([osp.dirname(img_p) for img_p in keys]))) result_dict = {} for imgs_dir in imgs_dirs: result_dict[imgs_dir] = [] img_ps = dataset_utils.scan_jpg_from_img_dir(imgs_dir) for img_p in img_ps: result_dict[imgs_dir].append(pred_and_gt.get( img_p, [0, 0])) # [pred, target] result_dict[imgs_dir] = np.asarray(result_dict[imgs_dir]) precision, recall, f_score, MNA, result_dict, match_regions_record = evaluate_pred_and_gt( result_dict, args) # visulization if args.local_rank == 0: column = 3 fig = plt.figure(figsize=(10, ((len(imgs_dirs) - 1) // column + 1) * 2)) for i, imgs_dir in enumerate(imgs_dirs): fig.add_subplot((len(imgs_dirs) - 1) // column + 1, column, i + 1) data = result_dict[imgs_dir] pred = data[:, 0] target = data[:, 1] plt.plot(pred, 'b-', alpha=0.5) plt.plot(target, 'r-', alpha=0.5) # gt plt.title(osp.basename(imgs_dir)) fig.tight_layout() out_dir = osp.join(args.save_root, args.root_output, val_id) os.makedirs(out_dir, exist_ok=True) plt.savefig(osp.join(out_dir, 'bi_label_{}.pdf'.format(epoch))) plt.close('all') return precision, recall, f_score, MNA, match_regions_record def extend_front(front, pred, patience): assert pred[front] > 0 d = patience while d > 0: if front + d < len(pred) and pred[front + d] > 0: return extend_front(front + d, pred, patience) d -= 1 return front def evaluate_pred_and_gt(result_dict, args): if args.dataset == 'SAMM': # patience = 25 pred_gt = np.load(osp.join(paths.SAMM_ROOT, 'pred_gt.npy'), allow_pickle=True).item() elif args.dataset == 'CASME_2': pred_gt = np.load(osp.join(paths.CASME_2_LABEL_DIR, 'pred_gt.npy'), allow_pickle=True).item() # patience = 10 else: raise NotImplementedError M = 0 N = 0 A = 0 match_regions_record = {} for imgs_dir, data in result_dict.items(): pred = data[:, 0] target = data[:, 1] found_regions = [] match_regions = [ ] # gt_onset, gt_offset, pred_onset, pred_offset, TP/FP front = 0 while front < len(pred): tail = front if pred[front] > 0: tail = extend_front(front, pred, args.patience) if front < tail: # find one region found_regions.append([front, tail]) front = tail + args.patience # modify result_dict pred = np.zeros_like(pred) for front, tail in found_regions: pred[front:tail] = 1 data[:, 0] = pred result_dict[imgs_dir] = data # eval precision, recall, f_score gt_list = pred_gt[imgs_dir] m = len(gt_list) n = len(found_regions) a = 0 # TODO: determine whether one predicted region is macro or micro-expression gt_regions_mark = np.zeros(m) found_regions_mark = np.zeros(n) for mg, [onset, offset, label_gt] in enumerate(gt_list): # label_gt: 1->macro, 2->micro for mf, [front, tail] in enumerate( found_regions ): # TODO: if one found region can match more than one gt region if front < onset: b1 = [front, tail] b2 = [onset, offset] else: b1 = [onset, offset] b2 = [front, tail] # 1 if b1[1] >= b2[0] and b2[1] >= b1[1]: overlap = b1[1] - b2[0] + 1 union = b2[1] - b1[0] + 1 elif b1[1] >= b2[1]: overlap = b2[1] - b2[0] + 1 union = b1[1] - b1[0] + 1 else: # no overlap overlap = 0 union = 1 if overlap / union >= 0.5: a += 1 found_regions_mark[mf] = 1 gt_regions_mark[mg] = 1 match_regions.append([onset, offset, front, tail, 'TP']) break for mg in range(m): if gt_regions_mark[mg] == 0: onset, offset, _ = gt_list[mg] match_regions.append([onset, offset, '-', '-', 'FN']) for mf in range(n): if found_regions_mark[mf] == 0: front, tail = found_regions[mf] match_regions.append(['-', '-', front, tail, 'FP']) match_regions_record[imgs_dir] = match_regions M += m N += n A += a # NOTE: if one found region can match more than one gt region, TP+FP may be greater than n # result of the participant if A == 0 or N == 0: precision = -1.0 recall = -1.0 f_score = -1.0 else: precision = A / N recall = A / M f_score = 2 * precision * recall / (precision + recall) return precision, recall, f_score, (M, N, A), result_dict, match_regions_record def calculate_metric_from_dict_MNA(MNA_all): M = 0 N = 0 A = 0 for k, mna in MNA_all.items(): m, n, a = mna M += m N += n A += a try: precision = A / N recall = A / M f_score = 2 * precision * recall / (precision + recall) except: precision = -1.0 recall = -1.0 f_score = -1.0 return precision, recall, f_score def synchronize(): if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() def reduce_loss(loss, args): if args.distributed: loss = reduce_tensor(loss.data, float(args.world_size)) return loss def synchronize_pred_and_gt(pred_and_gt, epoch, args, remove=True): if args.distributed: out_dir = osp.join(args.save_root, args.root_runs, 'temp_{}'.format(epoch)) if args.local_rank == 0: os.makedirs(out_dir, exist_ok=True) synchronize() # make dir done np.save( osp.join(out_dir, 'temp_pred_and_gt_{}.npy'.format(args.local_rank)), pred_and_gt) synchronize() # save done if args.local_rank == 0: pred_and_gt = {} for name in os.listdir(out_dir): data = np.load(osp.join(out_dir, name), allow_pickle=True).item() pred_and_gt.update(data) np.save(osp.join(out_dir, 'temp_pred_and_gt_merge.npy'), pred_and_gt) synchronize() # merge done else: synchronize() # start read pred_and_gt = np.load(osp.join(out_dir, 'temp_pred_and_gt_merge.npy'), allow_pickle=True).item() synchronize() # read done if remove and args.local_rank == 0: shutil.rmtree(out_dir) return pred_and_gt def synchronize_f_score(f_score, args): assert isinstance(f_score, float) if args.distributed: f_score = torch.tensor(f_score).cuda() assert f_score.dtype == torch.float32 synchronize() # wait tensor allocation dist.broadcast(f_score, src=0) f_score = f_score.item() return f_score def synchronize_list(list_obj, args): assert isinstance(list_obj, (list, tuple)) if args.distributed: list_obj = torch.tensor(list_obj, dtype=torch.int32).cuda() synchronize() # wait tensor allocation dist.broadcast(list_obj, src=0) list_obj = list_obj.cpu().numpy().tolist() return list_obj def delete_records(total_MNA, match_regions_record_all, val_id): # keys1 = list(total_MNA.keys()) keys2 = list(match_regions_record_all.keys()) rm_key = val_id del total_MNA[rm_key] for k in keys2: if k.split('/')[-2] == rm_key or osp.basename(k).split( '_')[0] == rm_key: del match_regions_record_all[k] return total_MNA, match_regions_record_all
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
liuhongbo830117/ntire2018_adv_rgb2hs
refs/heads/master
{"/data/icvl_dataset.py": ["/util/spectral_color.py"]}
└── ├── data │ ├── aligned_dataset.py │ └── icvl_dataset.py ├── eval │ ├── evaluation.py │ └── select_model.py ├── models │ └── mylosses.py └── util └── spectral_color.py
/data/aligned_dataset.py
import os.path import random import torchvision.transforms as transforms import torch from data.base_dataset import BaseDataset from data.image_folder import make_dataset from PIL import Image
/data/icvl_dataset.py
import os.path import random import torchvision.transforms as transforms import torch # import torch.nn.functional as F from data.base_dataset import BaseDataset from data.image_folder import make_dataset_from_dir_list from PIL import Image, ImageOps import h5py import numpy as np import spectral from tqdm import tqdm from joblib import Parallel, delayed from util.spectral_color import dim_ordering_tf2th, dim_ordering_th2tf class IcvlNtire2018Dataset(BaseDataset): def initialize(self, opt): self.opt = opt self.challenge = opt.challenge # 'Clean' or 'RealWorld' self.root = opt.dataroot # e.g. icvl_ntire2018 assert (opt.phase in ['train', 'Validate', 'Test']) self.dirlist_rgb = [os.path.join(self.root, 'NTIRE2018_Train1_' + self.challenge), os.path.join(self.root, 'NTIRE2018_Train2_' + self.challenge)] if opt.phase == 'train' else [os.path.join(self.root, 'NTIRE2018_' + opt.phase + '_' + self.challenge)] # A self.dirlist_hs = [os.path.join(self.root, 'NTIRE2018_Train1_Spectral'), os.path.join(self.root, 'NTIRE2018_Train2_Spectral')] if opt.phase == 'train' else [os.path.join(self.root, 'NTIRE2018_' + opt.phase + '_Spectral')] # B self.paths_rgb = sorted(make_dataset_from_dir_list(self.dirlist_rgb)) self.paths_hs = sorted(make_dataset_from_dir_list(self.dirlist_hs)) # self.dir_AB = os.path.join(opt.dataroot, opt.phase) # self.AB_paths = sorted(make_dataset(self.dir_AB)) # print('RETURN TO FULL SIZE PATHS_hs and RGB') #fixme # self.paths_rgb = self.paths_rgb[:5] # self.paths_hs = self.paths_hs[:5] # to handle envi files, so that we can do partial loads self.use_envi = opt.use_envi if self.use_envi: # update self.dirlist_hs self.dirlist_hs_mat = self.dirlist_hs self.dirlist_hs = [os.path.join(self.root, 'NTIRE2018_Train_Spectral_envi')] print(spectral.io.envi.get_supported_dtypes()) if opt.generate_envi_files: self.generate_envi_files(overwrite_envi=opt.overwrite_envi) # update self.paths_hs with the hdr files self.paths_hs = sorted(make_dataset_from_dir_list(self.dirlist_hs)) # for dir_hs in self.dirlist_hs: # if not os.path.exists(dir_hs): assert(opt.resize_or_crop == 'resize_and_crop') def __getitem__(self, index): # AB_path = self.AB_paths[index] # AB = Image.open(AB_path).convert('RGB') # AB = AB.resize((self.opt.loadSize * 2, self.opt.loadSize), Image.BICUBIC) # AB = transforms.ToTensor()(AB) # load rgb image path_rgb = self.paths_rgb[index] rgb = Image.open(path_rgb)#.convert('RGB') # fixme set it between 0,1? # rgb = transforms.ToTensor()(rgb) # rgb.shape: torch.Size([3, 1392, 1300]) # sample crop locations # w = rgb.shape[2] # over the tensor already # h = rgb.shape[1] # over the tensor already w = rgb.width #store them in self so as to accesswhile testing for cropping final result h = rgb.height w_offset = random.randint(0, max(0, w - self.opt.fineSize - 1)) h_offset = random.randint(0, max(0, h - self.opt.fineSize - 1)) # actually crop rgb image if self.opt.phase.lower() == 'train': if self.opt.challenge.lower() == 'realworld': # print('realworld<----------------------------------jitter') rgb = transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.01)(rgb) rgb = transforms.ToTensor()(rgb) # rgb.shape: torch.Size([3, 1392, 1300]) # train on random crops rgb_crop = rgb[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize] # rgb_crop is created as a tensor already else: topdown_pad = (1536 - h) // 2 leftright_pad = (1536 - w) // 2 full_img_padding = (leftright_pad, topdown_pad, leftright_pad, topdown_pad) rgb_crop = ImageOps.expand(rgb, full_img_padding) rgb_crop = transforms.ToTensor()(rgb_crop) ## load hs image if self.opt.phase == 'train': path_hs = self.paths_hs[index] if self.use_envi: hs = spectral.io.envi.open(path_hs) # https://github.com/spectralpython/spectral/blob/master/spectral/io/envi.py#L282 not loaded yet until read_subregion # hs.shape: Out[3]: (1392, 1300, 31) (nrows, ncols, nbands) # check dimensions and crop hs image (actually read only that one # print(rgb.shape) # print(hs.shape) assert (rgb.shape[1] == hs.shape[0] and rgb.shape[2] == hs.shape[1]) hs_crop = (hs.read_subregion(row_bounds=(h_offset, h_offset + self.opt.fineSize), col_bounds=(w_offset, w_offset + self.opt.fineSize))).astype(float) # hs_crop.shape = (h,w,c)=(256,256,31) here hs_crop = hs_crop / 4095. * 255 # 4096: db max. totensor expects in [0, 255] hs_crop = transforms.ToTensor()(hs_crop) # convert ndarray (h,w,c) [0,255]-> torch tensor (c,h,w) [0.0, 1.0] #move to GPU only the 256,256 crop!good! else: mat = h5py.File(path_hs) # b[{'rgb', 'bands', 'rad'}] # Shape: (Bands, Cols, Rows) <-> (bands, samples, lines) hs = mat['rad'].value # ndarray (c,w,h) hs = np.transpose(hs) # reverse axis order. ndarray (h,w,c). totensor expects this shape hs = hs / 4095. * 255 #4096: db max. totensor expects in [0, 255] hs = transforms.ToTensor()(hs) # convert ndarray (h,w,c) [0,255] -> torch tensor (c,h,w) [0.0, 1.0] #fixme why move everything and not only the crop to the gpu? # check dimensions and crop hs image # assert(rgb.shape[1] == hs.shape[1] and rgb.shape[2] == hs.shape[2]) if self.opt.phase == 'train': # train on random crops hs_crop = hs[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize] else: # Validate or Test hs_crop = hs #will pad on the net # topdown_pad = (1536 - 1392) // 2 # leftright_pad = (1536 - 1300) // 2 # hs_crop = F.pad(hs, (leftright_pad, leftright_pad, topdown_pad, topdown_pad)) rgb_crop = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(rgb_crop) #fixme still valid in icvl? if self.opt.phase == 'train': hs_crop = transforms.Normalize(tuple([0.5] * 31), tuple([0.5] * 31))(hs_crop) if self.opt.which_direction == 'BtoA': input_nc = self.opt.output_nc output_nc = self.opt.input_nc else: input_nc = self.opt.input_nc output_nc = self.opt.output_nc if (not self.opt.no_flip) and random.random() < 0.5: idx = [i for i in range(rgb_crop.size(2) - 1, -1, -1)] idx = torch.LongTensor(idx) rgb_crop = rgb_crop.index_select(2, idx) if self.opt.phase == 'train': hs_crop = hs_crop.index_select(2, idx) if input_nc == 1: # RGB to gray tmp = rgb_crop[0, ...] * 0.299 + rgb_crop[1, ...] * 0.587 + rgb_crop[2, ...] * 0.114 rgb_crop = tmp.unsqueeze(0) if self.opt.phase == 'train': if output_nc == 1: # RGB to gray tmp = hs_crop[0, ...] * 0.299 + hs_crop[1, ...] * 0.587 + hs_crop[2, ...] * 0.114 hs_crop = tmp.unsqueeze(0) if self.opt.phase == 'train': return_dict = {'A': rgb_crop, 'B': hs_crop, 'A_paths': path_rgb, 'B_paths': path_hs} else: # we just use the rgb paths instead, won't use them anyway. nasty, I know return_dict = {'A': rgb_crop, 'B': rgb_crop, 'A_paths': path_rgb, 'B_paths': path_rgb} if self.opt.phase == 'Validate' or self.opt.phase == 'Test': return_dict['full_img_padding'] = full_img_padding return return_dict def generate_single_envi_file(self, fpath_hs_mat, overwrite_envi=False): dir_hs = self.dirlist_hs[0] # for brevity hsmat = h5py.File(fpath_hs_mat) # b[{'rgb', 'bands', 'rad'}] # Shape: (Bands, Cols, Rows) <-> (bands, samples, lines) hsnp = hsmat['rad'].value # hs image numpy array # ndarray (c,w,h)spec # hdr = io.envi.read_envi_header(file='data/envi_template.hdr') # hdr = self.update_hs_metadata(metadata=hdr, wl=hsmat['bands'].value.flatten()) hdr_file = os.path.join(dir_hs, os.path.splitext(os.path.basename(fpath_hs_mat))[0] + '.hdr') spectral.io.envi.save_image(hdr_file=hdr_file, image=np.transpose(hsnp).astype(np.int16), force=overwrite_envi, dtype=np.int16) # dtype int16 range: [-32000, 32000] def generate_envi_files(self, overwrite_envi=False): if not os.path.exists(self.dirlist_hs[0]): os.makedirs(self.dirlist_hs[0]) nb_free_cores=1 Parallel(n_jobs=-1 - nb_free_cores)( delayed(self.generate_single_envi_file)(fpath_hs_mat=fpath_hs_mat, overwrite_envi=overwrite_envi) for fpath_hs_mat in tqdm(self.paths_hs)) def create_base_hdr(self): hdr=[] """ http://www.harrisgeospatial.com/docs/ENVIHeaderFiles.html#Example data_Type: The type of data representation: 1 = Byte: 8-bit unsigned integer 2 = Integer: 16-bit signed integer 3 = Long: 32-bit signed integer 4 = Floating-point: 32-bit single-precision 5 = Double-precision: 64-bit double-precision floating-point 6 = Complex: Real-imaginary pair of single-precision floating-point 9 = Double-precision complex: Real-imaginary pair of double precision floating-point 12 = Unsigned integer: 16-bit 13 = Unsigned long integer: 32-bit 14 = 64-bit long integer (signed) 15 = 64-bit unsigned long integer (unsigned)""" return hdr def update_hs_metadata(self, metadata, wl): metadata['interleave'] = 'bsq' # (Rows, Cols, Bands) <->(lines, samples, bands) # metadata['lines'] = int(metadata['lines']) - 4 # lines = rows. Lines <= 1300 # metadata['samples'] = 1392 # samples = cols. Samples are 1392 for the whole dataset # metadata['bands'] = len(wl) metadata['data type'] = 4 #5 = Double-precision: 64-bit double-precision floating-point http://www.harrisgeospatial.com/docs/ENVIHeaderFiles.html#Example metadata['wavelength'] = wl metadata['default bands'] = [5, 15, 25] metadata['fwhm'] = np.diff(wl) metadata['vroi'] = [1, len(wl)] return metadata def __len__(self): return len(self.paths_rgb) def name(self): return 'icvl_ntire2018_dataset'
/eval/evaluation.py
# Evaluation script for the NTIRE 2018 Spectral Reconstruction Challenge # # * Provide input and output directories as arguments # * Validation files should be found in the '/ref' subdirectory of the input dir # * Input validation files are expected in the v7.3 .mat format import h5py as h5py import numpy as np import sys import os MRAEs = {} RMSEs = {} def get_ref_from_file(filename): matfile = h5py.File(filename, 'r') mat={} for k, v in matfile.items(): mat[k] = np.array(v) return mat['rad'] #input and output directories given as arguments [_, input_dir, output_dir] = sys.argv validation_files = os.listdir(input_dir +'/ref') for f in validation_files: # Read ground truth data if not(os.path.splitext(f)[1] in '.mat'): print('skipping '+f) continue gt = get_ref_from_file(input_dir + '/ref/' + f) # Read user submission rc = get_ref_from_file(input_dir + '/res/' + f) # compute MRAE diff = gt-rc abs_diff = np.abs(diff) relative_abs_diff = np.divide(abs_diff,gt+np.finfo(float).eps) # added epsilon to avoid division by zero. MRAEs[f] = np.mean(relative_abs_diff) # compute RMSE square_diff = np.power(diff,2) RMSEs[f] = np.sqrt(np.mean(square_diff)) print(f) print(MRAEs[f]) print(RMSEs[f]) MRAE = np.mean(MRAEs.values()) print("MRAE:\n"+MRAE.astype(str)) RMSE = np.mean(RMSEs.values()) print("\nRMSE:\n"+RMSE.astype(str)) with open(output_dir + '/scores.txt', 'w') as output_file: output_file.write("MRAE:"+MRAE.astype(str)) output_file.write("\nRMSE:"+RMSE.astype(str))
/eval/select_model.py
# -*- coding: utf-8 -*- import pandas as pd import os import sacred import glob from sacred import Experiment ex = Experiment('rename_to_samename') @ex.config def config(): results_home_dir = os.path.abspath('/home/aitor/dev/adv_rgb2hs_pytorch/results') @ex.automain def select_model(results_home_dir): res_dir_list = glob.glob(results_home_dir + '/*') dfall_list = [] for res_dir in res_dir_list: exp = os.path.basename(res_dir) fpath = os.path.join(res_dir, 'scores.txt') try: f = open(fpath) except IOError: print(fpath + ' does not exist') else: with f: content = f.readlines() content = [x.strip() for x in content] results = dict([elem.split(':') for elem in content]) results = {k: [v] for k, v in results.items()} # from_dict() needs iterable as value per key/column name results['exp'] = [exp] dfbuff = pd.DataFrame.from_dict(results) dfbuff = dfbuff.set_index('exp') dfall_list.append(dfbuff) dfall = pd.concat(dfall_list) dfall = dfall.astype(float) print(dfall.sort_values(by='RMSE', ascending=True)) print(dfall.sort_values(by='MRAE', ascending=True)) pass
/models/mylosses.py
# -*- coding: utf-8 -*- import numpy as np from torch.nn.modules import loss from torch.nn import functional as F import torch from torch.autograd import Variable class RelMAELoss(loss._Loss): r"""Creates a criterion that measures the mean squared error between `n` elements in the input `x` and target `y`. The loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \left( x_n - y_n \right)^2, where :math:`N` is the batch size. If reduce is ``True``, then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\ \operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}. \end{cases} `x` and `y` arbitrary shapes with a total of `n` elements each. The sum operation still operates over all the elements, and divides by `n`. The division by `n` can be avoided if one sets the internal variable `size_average` to ``False``. To get a batch of losses, a loss per batch element, set `reduce` to ``False``. These losses are not averaged and are not affected by `size_average`. Args: size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field size_average is set to ``False``, the losses are instead summed for each minibatch. Only applies when reduce is ``True``. Default: ``True`` reduce (bool, optional): By default, the losses are averaged over observations for each minibatch, or summed, depending on size_average. When reduce is ``False``, returns a loss per batch element instead and ignores size_average. Default: ``True`` Shape: - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - Target: :math:`(N, *)`, same shape as the input Examples:: >>> loss = nn.MSELoss() >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True) >>> target = autograd.Variable(torch.randn(3, 5)) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, size_average=True, reduce=True): super(RelMAELoss, self).__init__(size_average) self.reduce = reduce def forward(self, input, target): input = (input + 1) / 2.0 * 4095.0 target = (target + 1) / 2.0 * 4095.0 loss._assert_no_grad(target) abs_diff = torch.abs(target - input) relative_abs_diff = abs_diff / (target + np.finfo(float).eps) rel_mae = torch.mean(relative_abs_diff) #from eval: # compute MRAE # diff = gt - rc # abs_diff = np.abs(diff) # relative_abs_diff = np.divide(abs_diff, gt + np.finfo(float).eps) # added epsilon to avoid division by zero. # MRAEs[f] = np.mean(relative_abs_diff) return rel_mae class ZeroGanLoss(loss._Loss): r"""Creates a criterion that measures the mean squared error between `n` elements in the input `x` and target `y`. The loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \left( x_n - y_n \right)^2, where :math:`N` is the batch size. If reduce is ``True``, then: .. math:: \ell(x, y) = \begin{cases} \operatorname{mean}(L), & \text{if}\; \text{size_average} = \text{True},\\ \operatorname{sum}(L), & \text{if}\; \text{size_average} = \text{False}. \end{cases} `x` and `y` arbitrary shapes with a total of `n` elements each. The sum operation still operates over all the elements, and divides by `n`. The division by `n` can be avoided if one sets the internal variable `size_average` to ``False``. To get a batch of losses, a loss per batch element, set `reduce` to ``False``. These losses are not averaged and are not affected by `size_average`. Args: size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field size_average is set to ``False``, the losses are instead summed for each minibatch. Only applies when reduce is ``True``. Default: ``True`` reduce (bool, optional): By default, the losses are averaged over observations for each minibatch, or summed, depending on size_average. When reduce is ``False``, returns a loss per batch element instead and ignores size_average. Default: ``True`` Shape: - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - Target: :math:`(N, *)`, same shape as the input Examples:: >>> loss = nn.MSELoss() >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True) >>> target = autograd.Variable(torch.randn(3, 5)) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, size_average=True, reduce=True): super(ZeroGanLoss, self).__init__(size_average) self.reduce = reduce def forward(self, input, target): # zero = Variable(torch.Tensor([0]).double()) zeros = input * 0. return torch.sum(zeros)
/util/spectral_color.py
# -*- coding: utf-8 -*- import os import numpy as np from colour.plotting import * import colour import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from skimage.color import colorconv from spectral import * ### to avoid importing pyresources.assemple data def dim_ordering_tf2th(img_list_ndarray): """ convert ndarray with dimensions ordered as tf to th 'tf' expects (nb_imgs, nb_rows, nb_cols, nb_channels) < -- compatible with plt.imshow(img_list[0,:,:,:]) 'th' expects (nb_imgs, nb_channels, nb_rows, nb_cols) Parameters ---------- img_list_ndarray: ndarray Input ndarray of dimensions coherent with 'tf': (nb_imgs, nb_rows, nb_cols, nb_channels) Returns ------- img_ndarray: ndarray Output ndarray of dimensions coherent with 'th': (nb_imgs, nb_channels, nb_rows, nb_cols) """ if len(img_list_ndarray.shape) == 4: img_list_ndarray = np.rollaxis(img_list_ndarray, 3, 1) elif len(img_list_ndarray.shape) == 3: # single image img_list_ndarray = np.rollaxis(img_list_ndarray, 2, 0) else: raise NotImplementedError('Input must be 3 or 4 dimnesional ndarray') return img_list_ndarray def dim_ordering_th2tf(img_list_ndarray): """ convert ndarray with dimensions ordered as th to tf 'tf' expects (nb_imgs, nb_rows, nb_cols, nb_channels) < -- compatible with plt.imshow(img_list[0,:,:,:]) 'th' expects (nb_imgs, nb_channels, nb_rows, nb_cols) Parameters ---------- img_list_ndarray: ndarray Input ndarray of dimensions coherent with 'th': (nb_imgs, nb_channels, nb_rows, nb_cols) Returns ------- img_ndarray: ndarray Output ndarray of dimensions coherent with 'tf': (nb_imgs, nb_rows, nb_cols, nb_channels) """ if len(img_list_ndarray.shape) == 4: img_list_ndarray = np.rollaxis(img_list_ndarray, 1, 4) elif len(img_list_ndarray.shape) == 3: # single image img_list_ndarray = np.rollaxis(img_list_ndarray, 0, 3) else: raise NotImplementedError('Input must be 3 or 4 dimnesional ndarray') return img_list_ndarray def spectral2XYZ_img_vectorized(cmfs, R): """ Parameters ---------- cmfs R: np.ndarray (nb_pixels, 3) in [0., 1.] Returns ------- """ x_bar, y_bar, z_bar = colour.tsplit(cmfs) # tested: OK. x_bar is the double one, the rightmost one (red). z_bar is the leftmost one (blue) plt.close('all') plt.plot(np.array([z_bar, y_bar, x_bar]).transpose()) plt.savefig('cmf_cie1964_10.png') plt.close('all') # illuminant. We assume that the captured R is reflectance with illuminant E (although it really is not, it is reflected radiance with an unknown illuminant, but the result is the same) S = colour.ILLUMINANTS_RELATIVE_SPDS['E'].values[20:81:2] / 100. # Equal-energy radiator (ones) sample_spectra_from_hsimg 300 to xxx with delta=5nm # print S # dw = cmfs.shape.interval dw = 10 k = 100 / (np.sum(y_bar * S) * dw) X_p = R * x_bar * S * dw # R(N,31) * x_bar(31,) * S(31,) * dw(1,) Y_p = R * y_bar * S * dw Z_p = R * z_bar * S * dw XYZ = k * np.sum(np.array([X_p, Y_p, Z_p]), axis=-1) XYZ = np.rollaxis(XYZ, 1, 0) # th2tf() but for 2D input return XYZ def spectral2XYZ_img(hs, cmf_name, image_data_format='channels_last'): """ Convert spectral image input to XYZ (tristimulus values) image Parameters ---------- hs: numpy.ndarray 3 dimensional numpy array containing the spectral information in either (h,w,c) ('channels_last') or (c,h,w) ('channels_first') formats cmf_name: basestring String describing the color matching functions to be used image_data_format: basestring {'channels_last', 'channels_first'}. Default: 'channels_last' Channel dimension ordering of the input spectral image. the rgb output will follow the same dim ordering format Returns ------- XYZ: numpy.ndarray 3 dimensional numpy array containing the tristimulus value information in either (h,w,3) ('channels_last') or (3,h,w) ('channels_first') formats """ if image_data_format == 'channels_first': hs = dim_ordering_th2tf(hs) # th2tf (convert to channels_last elif image_data_format == 'channels_last': pass else: raise AttributeError('Wrong image_data_format parameter ' + image_data_format) # flatten h, w, c = hs.shape hs = hs.reshape(-1, c) cmfs = get_cmfs(cmf_name=cmf_name, nm_range=(400., 700.), nm_step=10, split=False) XYZ = spectral2XYZ_img_vectorized(cmfs, hs) # (nb_px, 3) # recover original shape (needed to call to xyz2rgb() XYZ = XYZ.reshape((h, w, 3)) if image_data_format == 'channels_first': # convert back to channels_first XYZ = dim_ordering_tf2th(XYZ) return XYZ def spectral2sRGB_img(spectral, cmf_name, image_data_format='channels_last'): """ Convert spectral image input to rgb image Parameters ---------- spectral: numpy.ndarray 3 dimensional numpy array containing the spectral information in either (h,w,c) ('channels_last') or (c,h,w) ('channels_first') formats cmf_name: basestring String describing the color matching functions to be used image_data_format: basestring {'channels_last', 'channels_first'}. Default: 'channels_last' Channel dimension ordering of the input spectral image. the rgb output will follow the same dim ordering format Returns ------- rgb: numpy.ndarray 3 dimensional numpy array containing the spectral information in either (h,w,3) ('channels_last') or (3,h,w) ('channels_first') formats """ XYZ = spectral2XYZ_img(hs=spectral, cmf_name=cmf_name, image_data_format=image_data_format) if image_data_format == 'channels_first': XYZ = dim_ordering_th2tf(XYZ) # th2tf (convert to channels_last elif image_data_format == 'channels_last': pass else: raise AttributeError('Wrong image_data_format parameter ' + image_data_format) #we need to pass in channels_last format to xyz2rgb sRGB = colorconv.xyz2rgb(XYZ/100.) if image_data_format == 'channels_first': # convert back to channels_first sRGB = dim_ordering_tf2th(sRGB) return sRGB def save_hs_as_envi(fpath, hs31, image_data_format_in='channels_last'):#, image_data_format_out='channels_last'): #output is always channels_last if image_data_format_in == 'channels_first': hs31 = dim_ordering_th2tf(hs31) elif image_data_format_in != 'channels_last': raise AttributeError('Wrong image_data_format_in') # dst_dir = os.path.dirname(fpath) hdr_fpath = fpath + '.hdr' wl = np.arange(400, 701, 10) hs31_envi = envi.create_image(hdr_file=hdr_fpath, metadata=generate_metadata(wl=wl), shape=hs31.shape, # Must be in (Rows, Cols, Bands) force=True, dtype=np.float32, # np.float32, 32MB/img np.ubyte: 8MB/img ext='.envi31') mm = hs31_envi.open_memmap(writable=True) mm[:, :, :] = hs31 def generate_metadata(wl): md = dict() md['interleave'] = 'bsq' # (Rows, Cols, Bands) <->(lines, samples, bands) md['data type'] = 12 md['wavelength'] = wl md['default bands'] = [22, 15, 6] # for spectral2dummyRGB md['fwhm'] = np.diff(wl) # md['vroi'] = [1, len(wl)] return md def load_envi(fpath_envi, fpath_hdr=None): if fpath_hdr is None: fpath_hdr = os.path.splitext(fpath_envi)[0] + '.hdr' hs = io.envi.open(fpath_hdr, fpath_envi) return hs def get_cmfs(cmf_name='cie1964_10', nm_range=(400., 700.), nm_step=10, split=True): if cmf_name == 'cie1931_2': cmf_full_name = 'CIE 1931 2 Degree Standard Observer' elif cmf_name == 'cie1931_10': cmf_full_name = 'CIE 1931 10 Degree Standard Observer' elif cmf_name == 'cie1964_2': cmf_full_name = 'CIE 1964 2 Degree Standard Observer' elif cmf_name == 'cie1964_10': cmf_full_name = 'CIE 1964 10 Degree Standard Observer' else: raise AttributeError('Wrong cmf name') cmfs = colour.STANDARD_OBSERVERS_CMFS[cmf_full_name] # subsample and trim range ix_wl_first = np.where(cmfs.wavelengths == nm_range[0])[0][0] ix_wl_last = np.where(cmfs.wavelengths == nm_range[1] + 1.)[0][0] cmfs = cmfs.values[ix_wl_first:ix_wl_last:int(nm_step), :] # make sure the nm_step is an int if split: x_bar, y_bar, z_bar = colour.tsplit(cmfs) #tested: OK. x_bar is the double one, the rightmost one (red). z_bar is the leftmost one (blue) return x_bar, y_bar, z_bar else: return cmfs
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
eric-z-lin/DIAYN-PyTorch
refs/heads/main
{"/Common/__init__.py": ["/Common/play.py", "/Common/config.py", "/Common/logger.py"], "/main.py": ["/Common/play.py", "/Brain/agent.py", "/Common/config.py", "/Common/logger.py"], "/Brain/__init__.py": ["/Brain/agent.py"], "/Brain/agent.py": ["/Brain/replay_memory.py", "/Brain/model.py"]}
└── ├── Brain │ ├── __init__.py │ ├── agent.py │ ├── model.py │ └── replay_memory.py ├── Common │ ├── __init__.py │ ├── config.py │ ├── logger.py │ └── play.py └── main.py
/Brain/__init__.py
from .agent import SACAgent
/Brain/agent.py
import numpy as np from .model import PolicyNetwork, QvalueNetwork, ValueNetwork, Discriminator import torch from .replay_memory import Memory, Transition from torch import from_numpy from torch.optim.adam import Adam from torch.nn.functional import log_softmax class SACAgent: def __init__(self, p_z, **config): self.config = config self.n_states = self.config["n_states"] self.n_skills = self.config["n_skills"] self.batch_size = self.config["batch_size"] self.p_z = np.tile(p_z, self.batch_size).reshape(self.batch_size, self.n_skills) self.memory = Memory(self.config["mem_size"], self.config["seed"]) self.device = "cuda" if torch.cuda.is_available() else "cpu" torch.manual_seed(self.config["seed"]) self.policy_network = PolicyNetwork(n_states=self.n_states + self.n_skills, n_actions=self.config["n_actions"], action_bounds=self.config["action_bounds"], n_hidden_filters=self.config["n_hiddens"]).to(self.device) self.q_value_network1 = QvalueNetwork(n_states=self.n_states + self.n_skills, n_actions=self.config["n_actions"], n_hidden_filters=self.config["n_hiddens"]).to(self.device) self.q_value_network2 = QvalueNetwork(n_states=self.n_states + self.n_skills, n_actions=self.config["n_actions"], n_hidden_filters=self.config["n_hiddens"]).to(self.device) self.value_network = ValueNetwork(n_states=self.n_states + self.n_skills, n_hidden_filters=self.config["n_hiddens"]).to(self.device) self.value_target_network = ValueNetwork(n_states=self.n_states + self.n_skills, n_hidden_filters=self.config["n_hiddens"]).to(self.device) self.hard_update_target_network() self.discriminator = Discriminator(n_states=self.n_states, n_skills=self.n_skills, n_hidden_filters=self.config["n_hiddens"]).to(self.device) self.mse_loss = torch.nn.MSELoss() self.cross_ent_loss = torch.nn.CrossEntropyLoss() self.value_opt = Adam(self.value_network.parameters(), lr=self.config["lr"]) self.q_value1_opt = Adam(self.q_value_network1.parameters(), lr=self.config["lr"]) self.q_value2_opt = Adam(self.q_value_network2.parameters(), lr=self.config["lr"]) self.policy_opt = Adam(self.policy_network.parameters(), lr=self.config["lr"]) self.discriminator_opt = Adam(self.discriminator.parameters(), lr=self.config["lr"]) def choose_action(self, states): states = np.expand_dims(states, axis=0) states = from_numpy(states).float().to(self.device) action, _ = self.policy_network.sample_or_likelihood(states) return action.detach().cpu().numpy()[0] def store(self, state, z, done, action, next_state): state = from_numpy(state).float().to("cpu") z = torch.ByteTensor([z]).to("cpu") done = torch.BoolTensor([done]).to("cpu") action = torch.Tensor([action]).to("cpu") next_state = from_numpy(next_state).float().to("cpu") self.memory.add(state, z, done, action, next_state) def unpack(self, batch): batch = Transition(*zip(*batch)) states = torch.cat(batch.state).view(self.batch_size, self.n_states + self.n_skills).to(self.device) zs = torch.cat(batch.z).view(self.batch_size, 1).long().to(self.device) dones = torch.cat(batch.done).view(self.batch_size, 1).to(self.device) actions = torch.cat(batch.action).view(-1, self.config["n_actions"]).to(self.device) next_states = torch.cat(batch.next_state).view(self.batch_size, self.n_states + self.n_skills).to(self.device) return states, zs, dones, actions, next_states def train(self): if len(self.memory) < self.batch_size: return None else: batch = self.memory.sample(self.batch_size) states, zs, dones, actions, next_states = self.unpack(batch) p_z = from_numpy(self.p_z).to(self.device) # Calculating the value target reparam_actions, log_probs = self.policy_network.sample_or_likelihood(states) q1 = self.q_value_network1(states, reparam_actions) q2 = self.q_value_network2(states, reparam_actions) q = torch.min(q1, q2) target_value = q.detach() - self.config["alpha"] * log_probs.detach() value = self.value_network(states) value_loss = self.mse_loss(value, target_value) logits = self.discriminator(torch.split(next_states, [self.n_states, self.n_skills], dim=-1)[0]) p_z = p_z.gather(-1, zs) logq_z_ns = log_softmax(logits, dim=-1) rewards = logq_z_ns.gather(-1, zs).detach() - torch.log(p_z + 1e-6) # Calculating the Q-Value target with torch.no_grad(): target_q = self.config["reward_scale"] * rewards.float() + \ self.config["gamma"] * self.value_target_network(next_states) * (~dones) q1 = self.q_value_network1(states, actions) q2 = self.q_value_network2(states, actions) q1_loss = self.mse_loss(q1, target_q) q2_loss = self.mse_loss(q2, target_q) policy_loss = (self.config["alpha"] * log_probs - q).mean() logits = self.discriminator(torch.split(states, [self.n_states, self.n_skills], dim=-1)[0]) discriminator_loss = self.cross_ent_loss(logits, zs.squeeze(-1)) self.policy_opt.zero_grad() policy_loss.backward() self.policy_opt.step() self.value_opt.zero_grad() value_loss.backward() self.value_opt.step() self.q_value1_opt.zero_grad() q1_loss.backward() self.q_value1_opt.step() self.q_value2_opt.zero_grad() q2_loss.backward() self.q_value2_opt.step() self.discriminator_opt.zero_grad() discriminator_loss.backward() self.discriminator_opt.step() self.soft_update_target_network(self.value_network, self.value_target_network) return -discriminator_loss.item() def soft_update_target_network(self, local_network, target_network): for target_param, local_param in zip(target_network.parameters(), local_network.parameters()): target_param.data.copy_(self.config["tau"] * local_param.data + (1 - self.config["tau"]) * target_param.data) def hard_update_target_network(self): self.value_target_network.load_state_dict(self.value_network.state_dict()) self.value_target_network.eval() def get_rng_states(self): return torch.get_rng_state(), self.memory.get_rng_state() def set_rng_states(self, torch_rng_state, random_rng_state): torch.set_rng_state(torch_rng_state.to("cpu")) self.memory.set_rng_state(random_rng_state) def set_policy_net_to_eval_mode(self): self.policy_network.eval() def set_policy_net_to_cpu_mode(self): self.device = torch.device("cpu") self.policy_network.to(self.device)
/Brain/model.py
from abc import ABC import torch from torch import nn from torch.nn import functional as F from torch.distributions import Normal def init_weight(layer, initializer="he normal"): if initializer == "xavier uniform": nn.init.xavier_uniform_(layer.weight) elif initializer == "he normal": nn.init.kaiming_normal_(layer.weight) class Discriminator(nn.Module, ABC): def __init__(self, n_states, n_skills, n_hidden_filters=256): super(Discriminator, self).__init__() self.n_states = n_states self.n_skills = n_skills self.n_hidden_filters = n_hidden_filters self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters) init_weight(self.hidden1) self.hidden1.bias.data.zero_() self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters) init_weight(self.hidden2) self.hidden2.bias.data.zero_() self.q = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_skills) init_weight(self.q, initializer="xavier uniform") self.q.bias.data.zero_() def forward(self, states): x = F.relu(self.hidden1(states)) x = F.relu(self.hidden2(x)) logits = self.q(x) return logits class ValueNetwork(nn.Module, ABC): def __init__(self, n_states, n_hidden_filters=256): super(ValueNetwork, self).__init__() self.n_states = n_states self.n_hidden_filters = n_hidden_filters self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters) init_weight(self.hidden1) self.hidden1.bias.data.zero_() self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters) init_weight(self.hidden2) self.hidden2.bias.data.zero_() self.value = nn.Linear(in_features=self.n_hidden_filters, out_features=1) init_weight(self.value, initializer="xavier uniform") self.value.bias.data.zero_() def forward(self, states): x = F.relu(self.hidden1(states)) x = F.relu(self.hidden2(x)) return self.value(x) class QvalueNetwork(nn.Module, ABC): def __init__(self, n_states, n_actions, n_hidden_filters=256): super(QvalueNetwork, self).__init__() self.n_states = n_states self.n_hidden_filters = n_hidden_filters self.n_actions = n_actions self.hidden1 = nn.Linear(in_features=self.n_states + self.n_actions, out_features=self.n_hidden_filters) init_weight(self.hidden1) self.hidden1.bias.data.zero_() self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters) init_weight(self.hidden2) self.hidden2.bias.data.zero_() self.q_value = nn.Linear(in_features=self.n_hidden_filters, out_features=1) init_weight(self.q_value, initializer="xavier uniform") self.q_value.bias.data.zero_() def forward(self, states, actions): x = torch.cat([states, actions], dim=1) x = F.relu(self.hidden1(x)) x = F.relu(self.hidden2(x)) return self.q_value(x) class PolicyNetwork(nn.Module, ABC): def __init__(self, n_states, n_actions, action_bounds, n_hidden_filters=256): super(PolicyNetwork, self).__init__() self.n_states = n_states self.n_hidden_filters = n_hidden_filters self.n_actions = n_actions self.action_bounds = action_bounds self.hidden1 = nn.Linear(in_features=self.n_states, out_features=self.n_hidden_filters) init_weight(self.hidden1) self.hidden1.bias.data.zero_() self.hidden2 = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_hidden_filters) init_weight(self.hidden2) self.hidden2.bias.data.zero_() self.mu = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_actions) init_weight(self.mu, initializer="xavier uniform") self.mu.bias.data.zero_() self.log_std = nn.Linear(in_features=self.n_hidden_filters, out_features=self.n_actions) init_weight(self.log_std, initializer="xavier uniform") self.log_std.bias.data.zero_() def forward(self, states): x = F.relu(self.hidden1(states)) x = F.relu(self.hidden2(x)) mu = self.mu(x) log_std = self.log_std(x) std = log_std.clamp(min=-20, max=2).exp() dist = Normal(mu, std) return dist def sample_or_likelihood(self, states): dist = self(states) # Reparameterization trick u = dist.rsample() action = torch.tanh(u) log_prob = dist.log_prob(value=u) # Enforcing action bounds log_prob -= torch.log(1 - action ** 2 + 1e-6) log_prob = log_prob.sum(-1, keepdim=True) return (action * self.action_bounds[1]).clamp_(self.action_bounds[0], self.action_bounds[1]), log_prob
/Brain/replay_memory.py
import random from collections import namedtuple Transition = namedtuple('Transition', ('state', 'z', 'done', 'action', 'next_state')) class Memory: def __init__(self, buffer_size, seed): self.buffer_size = buffer_size self.buffer = [] self.seed = seed random.seed(self.seed) def add(self, *transition): self.buffer.append(Transition(*transition)) if len(self.buffer) > self.buffer_size: self.buffer.pop(0) assert len(self.buffer) <= self.buffer_size def sample(self, size): return random.sample(self.buffer, size) def __len__(self): return len(self.buffer) @staticmethod def get_rng_state(): return random.getstate() @staticmethod def set_rng_state(random_rng_state): random.setstate(random_rng_state)
/Common/__init__.py
from .config import get_params from .play import Play from .logger import Logger
/Common/config.py
import argparse def get_params(): parser = argparse.ArgumentParser( description="Variable parameters based on the configuration of the machine or user's choice") parser.add_argument("--env_name", default="BipedalWalker-v3", type=str, help="Name of the environment.") parser.add_argument("--interval", default=20, type=int, help="The interval specifies how often different parameters should be saved and printed," " counted by episodes.") parser.add_argument("--do_train", action="store_true", help="The flag determines whether to train the agent or play with it.") parser.add_argument("--train_from_scratch", action="store_false", help="The flag determines whether to train from scratch or continue previous tries.") parser.add_argument("--mem_size", default=int(1e+6), type=int, help="The memory size.") parser.add_argument("--n_skills", default=50, type=int, help="The number of skills to learn.") parser.add_argument("--reward_scale", default=1, type=float, help="The reward scaling factor introduced in SAC.") parser.add_argument("--seed", default=123, type=int, help="The randomness' seed for torch, numpy, random & gym[env].") parser_params = parser.parse_args() # Parameters based on the DIAYN and SAC papers. # region default parameters default_params = {"lr": 3e-4, "batch_size": 256, "max_n_episodes": 5000, "max_episode_len": 1000, "gamma": 0.99, "alpha": 0.1, "tau": 0.005, "n_hiddens": 300 } # endregion total_params = {**vars(parser_params), **default_params} return total_params
/Common/logger.py
import time import numpy as np import psutil from torch.utils.tensorboard import SummaryWriter import torch import os import datetime import glob class Logger: def __init__(self, agent, **config): self.config = config self.agent = agent self.log_dir = self.config["env_name"][:-3] + "/" + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") self.start_time = 0 self.duration = 0 self.running_logq_zs = 0 self.max_episode_reward = -np.inf self._turn_on = False self.to_gb = lambda in_bytes: in_bytes / 1024 / 1024 / 1024 if self.config["do_train"] and self.config["train_from_scratch"]: self._create_wights_folder(self.log_dir) self._log_params() @staticmethod def _create_wights_folder(dir): if not os.path.exists("Checkpoints"): os.mkdir("Checkpoints") os.mkdir("Checkpoints/" + dir) def _log_params(self): with SummaryWriter("Logs/" + self.log_dir) as writer: for k, v in self.config.items(): writer.add_text(k, str(v)) def on(self): self.start_time = time.time() self._turn_on = True def _off(self): self.duration = time.time() - self.start_time def log(self, *args): if not self._turn_on: print("First you should turn the logger on once, via on() method to be able to log parameters.") return self._off() episode, episode_reward, skill, logq_zs, step, *rng_states = args self.max_episode_reward = max(self.max_episode_reward, episode_reward) if self.running_logq_zs == 0: self.running_logq_zs = logq_zs else: self.running_logq_zs = 0.99 * self.running_logq_zs + 0.01 * logq_zs ram = psutil.virtual_memory() assert self.to_gb(ram.used) < 0.98 * self.to_gb(ram.total), "RAM usage exceeded permitted limit!" if episode % (self.config["interval"] // 3) == 0: self._save_weights(episode, *rng_states) if episode % self.config["interval"] == 0: print("E: {}| " "Skill: {}| " "E_Reward: {:.1f}| " "EP_Duration: {:.2f}| " "Memory_Length: {}| " "Mean_steps_time: {:.3f}| " "{:.1f}/{:.1f} GB RAM| " "Time: {} ".format(episode, skill, episode_reward, self.duration, len(self.agent.memory), self.duration / step, self.to_gb(ram.used), self.to_gb(ram.total), datetime.datetime.now().strftime("%H:%M:%S"), )) with SummaryWriter("Logs/" + self.log_dir) as writer: writer.add_scalar("Max episode reward", self.max_episode_reward, episode) writer.add_scalar("Running logq(z|s)", self.running_logq_zs, episode) writer.add_histogram(str(skill), episode_reward) writer.add_histogram("Total Rewards", episode_reward) self.on() def _save_weights(self, episode, *rng_states): torch.save({"policy_network_state_dict": self.agent.policy_network.state_dict(), "q_value_network1_state_dict": self.agent.q_value_network1.state_dict(), "q_value_network2_state_dict": self.agent.q_value_network2.state_dict(), "value_network_state_dict": self.agent.value_network.state_dict(), "discriminator_state_dict": self.agent.discriminator.state_dict(), "q_value1_opt_state_dict": self.agent.q_value1_opt.state_dict(), "q_value2_opt_state_dict": self.agent.q_value2_opt.state_dict(), "policy_opt_state_dict": self.agent.policy_opt.state_dict(), "value_opt_state_dict": self.agent.value_opt.state_dict(), "discriminator_opt_state_dict": self.agent.discriminator_opt.state_dict(), "episode": episode, "rng_states": rng_states, "max_episode_reward": self.max_episode_reward, "running_logq_zs": self.running_logq_zs }, "Checkpoints/" + self.log_dir + "/params.pth") def load_weights(self): model_dir = glob.glob("Checkpoints/" + self.config["env_name"][:-3] + "/") model_dir.sort() checkpoint = torch.load(model_dir[-1] + "/params.pth") self.log_dir = model_dir[-1].split(os.sep)[-1] self.agent.policy_network.load_state_dict(checkpoint["policy_network_state_dict"]) self.agent.q_value_network1.load_state_dict(checkpoint["q_value_network1_state_dict"]) self.agent.q_value_network2.load_state_dict(checkpoint["q_value_network2_state_dict"]) self.agent.value_network.load_state_dict(checkpoint["value_network_state_dict"]) self.agent.discriminator.load_state_dict(checkpoint["discriminator_state_dict"]) self.agent.q_value1_opt.load_state_dict(checkpoint["q_value1_opt_state_dict"]) self.agent.q_value2_opt.load_state_dict(checkpoint["q_value2_opt_state_dict"]) self.agent.policy_opt.load_state_dict(checkpoint["policy_opt_state_dict"]) self.agent.value_opt.load_state_dict(checkpoint["value_opt_state_dict"]) self.agent.discriminator_opt.load_state_dict(checkpoint["discriminator_opt_state_dict"]) self.max_episode_reward = checkpoint["max_episode_reward"] self.running_logq_zs = checkpoint["running_logq_zs"] return checkpoint["episode"], self.running_logq_zs, *checkpoint["rng_states"]
/Common/play.py
# from mujoco_py.generated import const from mujoco_py import GlfwContext import cv2 import numpy as np import os GlfwContext(offscreen=True) class Play: def __init__(self, env, agent, n_skills): self.env = env self.agent = agent self.n_skills = n_skills self.agent.set_policy_net_to_cpu_mode() self.agent.set_policy_net_to_eval_mode() self.fourcc = cv2.VideoWriter_fourcc(*'XVID') if not os.path.exists("Vid/"): os.mkdir("Vid/") @staticmethod def concat_state_latent(s, z_, n): z_one_hot = np.zeros(n) z_one_hot[z_] = 1 return np.concatenate([s, z_one_hot]) def evaluate(self): for z in range(self.n_skills): video_writer = cv2.VideoWriter(f"Vid/skill{z}" + ".avi", self.fourcc, 50.0, (250, 250)) s = self.env.reset() s = self.concat_state_latent(s, z, self.n_skills) episode_reward = 0 for _ in range(self.env.spec.max_episode_steps): action = self.agent.choose_action(s) s_, r, done, _ = self.env.step(action) s_ = self.concat_state_latent(s_, z, self.n_skills) episode_reward += r if done: break s = s_ I = self.env.render(mode='rgb_array') I = cv2.cvtColor(I, cv2.COLOR_RGB2BGR) I = cv2.resize(I, (250, 250)) video_writer.write(I) print(f"skill: {z}, episode reward:{episode_reward:.1f}") video_writer.release() self.env.close() cv2.destroyAllWindows()
/main.py
import gym from Brain import SACAgent from Common import Play, Logger, get_params import numpy as np from tqdm import tqdm import mujoco_py def concat_state_latent(s, z_, n): z_one_hot = np.zeros(n) z_one_hot[z_] = 1 return np.concatenate([s, z_one_hot]) if __name__ == "__main__": params = get_params() test_env = gym.make(params["env_name"]) n_states = test_env.observation_space.shape[0] n_actions = test_env.action_space.shape[0] action_bounds = [test_env.action_space.low[0], test_env.action_space.high[0]] params.update({"n_states": n_states, "n_actions": n_actions, "action_bounds": action_bounds}) print("params:", params) test_env.close() del test_env, n_states, n_actions, action_bounds env = gym.make(params["env_name"]) p_z = np.full(params["n_skills"], 1 / params["n_skills"]) agent = SACAgent(p_z=p_z, **params) logger = Logger(agent, **params) if params["do_train"]: if not params["train_from_scratch"]: episode, last_logq_zs, np_rng_state, *env_rng_states, torch_rng_state, random_rng_state = logger.load_weights() agent.hard_update_target_network() min_episode = episode np.random.set_state(np_rng_state) env.np_random.set_state(env_rng_states[0]) env.observation_space.np_random.set_state(env_rng_states[1]) env.action_space.np_random.set_state(env_rng_states[2]) agent.set_rng_states(torch_rng_state, random_rng_state) print("Keep training from previous run.") else: min_episode = 0 last_logq_zs = 0 np.random.seed(params["seed"]) env.seed(params["seed"]) env.observation_space.seed(params["seed"]) env.action_space.seed(params["seed"]) print("Training from scratch.") logger.on() for episode in tqdm(range(1 + min_episode, params["max_n_episodes"] + 1)): z = np.random.choice(params["n_skills"], p=p_z) state = env.reset() state = concat_state_latent(state, z, params["n_skills"]) episode_reward = 0 logq_zses = [] max_n_steps = min(params["max_episode_len"], env.spec.max_episode_steps) for step in range(1, 1 + max_n_steps): action = agent.choose_action(state) next_state, reward, done, _ = env.step(action) next_state = concat_state_latent(next_state, z, params["n_skills"]) agent.store(state, z, done, action, next_state) logq_zs = agent.train() if logq_zs is None: logq_zses.append(last_logq_zs) else: logq_zses.append(logq_zs) episode_reward += reward state = next_state if done: break logger.log(episode, episode_reward, z, sum(logq_zses) / len(logq_zses), step, np.random.get_state(), env.np_random.get_state(), env.observation_space.np_random.get_state(), env.action_space.np_random.get_state(), *agent.get_rng_states(), ) else: logger.load_weights() player = Play(env, agent, n_skills=params["n_skills"]) player.evaluate()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
CellProfiling/cpias
refs/heads/master
{"/cpias/__init__.py": ["/cpias/server.py", "/cpias/message.py"], "/cpias/cli/server.py": ["/cpias/server.py", "/cpias/cli/common.py"], "/cpias/commands/hello.py": ["/cpias/server.py", "/cpias/commands/__init__.py", "/cpias/process.py", "/cpias/message.py"], "/cpias/process.py": ["/cpias/server.py", "/cpias/exceptions.py"], "/cpias/server.py": ["/cpias/commands/__init__.py", "/cpias/message.py"], "/cpias/cli/client.py": ["/cpias/cli/common.py", "/cpias/client.py"], "/cpias/cli/__init__.py": ["/cpias/cli/client.py", "/cpias/cli/server.py"], "/cpias/commands/__init__.py": ["/cpias/message.py"], "/tests/test_message.py": ["/cpias/message.py"]}
└── ├── cpias │ ├── __init__.py │ ├── cli │ │ ├── __init__.py │ │ ├── client.py │ │ ├── common.py │ │ └── server.py │ ├── client.py │ ├── commands │ │ ├── __init__.py │ │ └── hello.py │ ├── const.py │ ├── exceptions.py │ ├── message.py │ ├── process.py │ └── server.py ├── setup.py └── tests └── test_message.py
/cpias/__init__.py
"""Provide a server for image analysis.""" from .const import VERSION from .message import Message from .server import CPIAServer __all__ = ["Message", "CPIAServer"] __version__ = VERSION
/cpias/cli/__init__.py
# type: ignore """Provide a CLI.""" import logging import click from cpias import __version__ from cpias.cli.client import run_client from cpias.cli.server import start_server SETTINGS = dict(help_option_names=["-h", "--help"]) @click.group( options_metavar="", subcommand_metavar="<command>", context_settings=SETTINGS ) @click.option("--debug", is_flag=True, help="Start server in debug mode.") @click.version_option(__version__) @click.pass_context def cli(ctx, debug): """Run CPIAS server.""" ctx.obj = {} ctx.obj["debug"] = debug if debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) cli.add_command(start_server) cli.add_command(run_client)
/cpias/cli/client.py
# type: ignore """Provide a CLI to start a client.""" import asyncio import click from cpias.cli.common import common_tcp_options from cpias.client import tcp_client DEFAULT_MESSAGE = '{"cli": "client-1", "cmd": "hello", "dta": {"planet": "world"}}\n' @click.command(options_metavar="<options>") @click.option("--message", default=DEFAULT_MESSAGE, help="Message to send to server.") @common_tcp_options @click.pass_context def run_client(ctx, message, host, port): """Run an async tcp client to connect to the server.""" debug = ctx.obj["debug"] asyncio.run( tcp_client(message, host=host, port=port), debug=debug, )
/cpias/cli/common.py
# type: ignore """Provide common CLI options.""" import click def common_tcp_options(func): """Supply common tcp connection options.""" func = click.option( "-p", "--port", default=8555, show_default=True, type=int, help="TCP port of the connection.", )(func) func = click.option( "-H", "--host", default="127.0.0.1", show_default=True, help="TCP address of the server.", )(func) return func
/cpias/cli/server.py
# type: ignore """Provide a CLI to start the server.""" import asyncio import click from cpias.cli.common import common_tcp_options from cpias.server import CPIAServer @click.command(options_metavar="<options>") @common_tcp_options @click.pass_context def start_server(ctx, host, port): """Start an async tcp server.""" debug = ctx.obj["debug"] server = CPIAServer(host=host, port=port) try: asyncio.run(server.start(), debug=debug) except KeyboardInterrupt: asyncio.run(server.stop(), debug=debug)
/cpias/client.py
"""Provide a test client for the CPIAServer.""" import asyncio from cpias.const import LOGGER async def tcp_client(message: str, host: str = "127.0.0.1", port: int = 8555) -> None: """Connect to server and send message.""" reader, writer = await asyncio.open_connection(host, port) data = await reader.readline() version_msg = data.decode() LOGGER.debug("Version message: %s", version_msg.strip()) LOGGER.info("Send: %r", message) writer.write(message.encode()) await writer.drain() data = await reader.readline() LOGGER.info("Received: %r", data.decode()) LOGGER.debug("Closing the connection") writer.close() await writer.wait_closed() if __name__ == "__main__": asyncio.run( tcp_client('{"cli": "client-1", "cmd": "hello", "dta": {"planet": "world"}}\n'), debug=True, ) asyncio.run( tcp_client( '{"cli": "client-1", "cmd": "hello_slow", "dta": {"planet": "slow"}}\n' ), debug=True, ) asyncio.run( tcp_client( '{"cli": "client-1", "cmd": "hello_persistent", ' '"dta": {"planet": "Mars"}}\n' ), debug=True, ) asyncio.run( tcp_client( '{"cli": "client-1", "cmd": "hello_process", ' '"dta": {"planet": "Neptune"}}\n' ), debug=True, )
/cpias/commands/__init__.py
"""Provide commands to the server.""" from functools import wraps from types import ModuleType from typing import Callable, Mapping import pkg_resources import voluptuous as vol from voluptuous.humanize import humanize_error from cpias.const import LOGGER from cpias.message import Message def get_commands() -> Mapping[str, ModuleType]: """Return a dict of command modules.""" commands = { entry_point.name: entry_point.load() for entry_point in pkg_resources.iter_entry_points("cpias.commands") } return commands def validate(schema: dict) -> Callable: """Return a decorator for argument validation.""" vol_schema = vol.Schema(schema) def decorator(func: Callable) -> Callable: """Decorate a function and validate its arguments.""" @wraps(func) async def check_args(server, message, **data): # type: ignore """Check arguments.""" try: data = vol_schema(data) except vol.Invalid as exc: err = humanize_error(data, exc) LOGGER.error( "Received invalid data for command %s: %s", message.command, err ) return Message(client=message.client, command="invalid", data=data) return await func(server, message, **data) return check_args return decorator
/cpias/commands/hello.py
"""Provide the hello command.""" from typing import TYPE_CHECKING, Callable, Optional, Tuple from cpias.commands import validate from cpias.const import LOGGER from cpias.message import Message from cpias.process import ReceiveError, create_process if TYPE_CHECKING: from cpias.server import CPIAServer # pylint: disable=unused-argument def register_command(server: "CPIAServer") -> None: """Register the hello command.""" server.register_command("hello", hello) server.register_command("hello_slow", hello_slow) server.register_command("hello_persistent", hello_persistent) server.register_command("hello_process", hello_process) @validate({"planet": str}) async def hello( server: "CPIAServer", message: Message, planet: Optional[str] = None ) -> Message: """Run the hello command.""" if planet is None: planet = "Jupiter" LOGGER.info("Hello %s!", planet) return message @validate({"planet": str}) async def hello_slow( server: "CPIAServer", message: Message, planet: Optional[str] = None ) -> Message: """Run the slow hello command.""" if planet is None: planet = "Jupiter" result = await server.run_process_job(do_cpu_work) LOGGER.info("Hello %s! The result is %s", planet, result) reply = message.copy() reply.data["result"] = result return reply @validate({"planet": str}) async def hello_persistent( server: "CPIAServer", message: Message, planet: Optional[str] = None ) -> Message: """Run the persistent hello command. This command creates a state the first time it's run. """ if planet is None: planet = "Jupiter" if "hello_persistent_state" not in server.store: server.store["hello_persistent_state"] = create_state() command_task = server.store["hello_persistent_state"] old_planet, new_planet = command_task(planet) LOGGER.info( "Hello! The old planet was %s. The new planet is %s", old_planet, new_planet ) reply = message.copy() reply.data["old_planet"] = old_planet reply.data["new_planet"] = new_planet return reply @validate({"planet": str}) async def hello_process( server: "CPIAServer", message: Message, planet: Optional[str] = None ) -> Message: """Run the process hello command. This command creates a process the first time it's run. """ if planet is None: planet = "Jupiter" if "hello_process" not in server.store: server.store["hello_process"] = create_process(server, create_state) recv, send = server.store["hello_process"] await send(planet) try: old_planet, new_planet = await recv() except ReceiveError: return message LOGGER.info( "Hello! The old planet was %s. The new planet is %s", old_planet, new_planet ) reply = message.copy() reply.data["old_planet"] = old_planet reply.data["new_planet"] = new_planet return reply def do_cpu_work() -> int: """Do work that should run in the process pool.""" return sum(i * i for i in range(10 ** 7)) def create_state() -> Callable: """Initialize state.""" state: str = "init" def change_state(new_state: str) -> Tuple[str, str]: """Do work that should change state.""" nonlocal state old_state = state state = new_state return old_state, new_state return change_state
/cpias/const.py
"""Provide constants for cpias.""" import logging from pathlib import Path VERSION = (Path(__file__).parent / "VERSION").read_text().strip() API_VERSION = "1.0.0" LOGGER = logging.getLogger(__package__)
/cpias/exceptions.py
"""Provide exceptions.""" class CPIASError(Exception): """Represent a generic CPIAS exception."""
/cpias/message.py
"""Provide a model for messages sent and received by the server.""" from __future__ import annotations import json from enum import Enum from typing import Optional, cast from .const import LOGGER class Message: """Represent a client/server message.""" def __init__(self, *, client: str, command: str, data: dict) -> None: """Set up message instance.""" self.client = client self.command = command self.data = data self.copy = self.__copy__ def __copy__(self) -> Message: """Copy message.""" msg_data = self.encode() new_msg = cast(Message, self.decode(msg_data)) return new_msg def __repr__(self) -> str: """Return the representation.""" return ( f"{type(self).__name__}(client={self.client}, command={self.command}, " f"data={self.data})" ) @classmethod def decode(cls, data: str) -> Optional[Message]: """Decode data into a message.""" # '{"cli": "client-1", "cmd": "hello", "dta": {"param1": "world"}}' try: parsed_data = json.loads(data.strip()) except ValueError: LOGGER.error("Failed to parse message data: %s", data) return None if not isinstance(parsed_data, dict): LOGGER.error("Incorrect message data: %s", parsed_data) return None params: dict = { block.name: parsed_data.get(block.value) for block in MessageBlock } return cls(**params) def encode(self) -> str: """Encode message into a data string.""" compiled_msg = {attr.value: getattr(self, attr.name) for attr in MessageBlock} return f"{json.dumps(compiled_msg)}\n" class MessageBlock(Enum): """Represent a message block.""" client = "cli" command = "cmd" data = "dta"
/cpias/process.py
"""Provide process tools.""" import asyncio import signal from multiprocessing import Pipe, Process from multiprocessing.connection import Connection from time import sleep from typing import TYPE_CHECKING, Any, Callable, Dict, Tuple from cpias.const import LOGGER from cpias.exceptions import CPIASError if TYPE_CHECKING: from cpias.server import CPIAServer class ReceiveError(CPIASError): """Error raised when receving from a process failed.""" def create_process( server: "CPIAServer", create_callback: Callable, *args: Any ) -> Tuple[Callable, Callable]: """Create a persistent process.""" parent_conn, child_conn = Pipe() prc = Process(target=func_wrapper, args=(create_callback, child_conn, *args)) prc.start() def stop_process() -> None: """Stop process.""" prc.terminate() server.on_stop(stop_process) async def async_recv() -> Any: """Receive data from the process connection asynchronously.""" while True: if not prc.is_alive() or parent_conn.poll(): break await asyncio.sleep(0.5) if not prc.is_alive(): raise ReceiveError try: return await server.add_executor_job(parent_conn.recv) except EOFError as exc: LOGGER.debug("Nothing more to receive") raise ReceiveError from exc async def async_send(data: Dict[Any, Any]) -> None: """Send data to the process.""" parent_conn.send(data) return async_recv, async_send def func_wrapper(create_callback: Callable, conn: Connection, *args: Any) -> None: """Wrap a function with connection to receive and send data.""" running = True # pylint: disable=unused-argument def handle_signal(signum: int, frame: Any) -> None: """Handle signal.""" nonlocal running running = False conn.close() signal.signal(signal.SIGTERM, handle_signal) signal.signal(signal.SIGINT, handle_signal) try: callback = create_callback(*args) except Exception as exc: # pylint: disable=broad-except LOGGER.error("Failed to create callback: %s", exc) return while running: while running: if conn.poll(): break sleep(0.5) try: data = conn.recv() except EOFError: LOGGER.debug("Nothing more to receive") break except OSError: LOGGER.debug("Connection is closed") break try: result = callback(data) except Exception as exc: # pylint: disable=broad-except LOGGER.error("Failed to run callback: %s", exc) break if not running: break try: conn.send(result) except ValueError: LOGGER.error("Failed to send result %s", result) except OSError: LOGGER.debug("Connection is closed") break LOGGER.debug("Exiting process")
/cpias/server.py
"""Provide an image analysis server.""" import asyncio import concurrent.futures import logging from typing import Any, Callable, Coroutine, Dict, Optional from .commands import get_commands from .const import API_VERSION, LOGGER, VERSION from .message import Message class CPIAServer: """Represent an image analysis server.""" # pylint: disable=too-many-instance-attributes def __init__(self, host: str = "localhost", port: int = 8555) -> None: """Set up server instance.""" self.host = host self.port = port self.server: Optional[asyncio.AbstractServer] = None self.serv_task: Optional[asyncio.Task] = None self.commands: Dict[str, Callable] = {} self._on_stop_callbacks: list = [] self._pending_tasks: list = [] self._track_tasks = False self.store: dict = {} async def start(self) -> None: """Start server.""" LOGGER.debug("Starting server") commands = get_commands() for module in commands.values(): module.register_command(self) # type: ignore server = await asyncio.start_server( self.handle_conn, host=self.host, port=self.port ) self.server = server async with server: self.serv_task = asyncio.create_task(server.serve_forever()) LOGGER.info("Serving at %s:%s", self.host, self.port) await self.serv_task async def stop(self) -> None: """Stop the server.""" LOGGER.info("Server shutting down") self._track_tasks = True for stop_callback in self._on_stop_callbacks: stop_callback() self._on_stop_callbacks.clear() await self.wait_for_tasks() if self.serv_task is not None: self.serv_task.cancel() await asyncio.sleep(0) # Let the event loop cancel the task. def on_stop(self, callback: Callable) -> None: """Register a callback that should be called on server stop.""" self._on_stop_callbacks.append(callback) def register_command(self, command_name: str, command_func: Callable) -> None: """Register a command function.""" self.commands[command_name] = command_func async def handle_conn( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter ) -> None: """Handle a connection.""" # Send server version and server api version as welcome message. version_msg = f"CPIAServer version: {VERSION}, api version: {API_VERSION}\n" writer.write(version_msg.encode()) await writer.drain() await self.handle_comm(reader, writer) LOGGER.debug("Closing the connection") writer.close() await writer.wait_closed() async def handle_comm( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter ) -> None: """Handle communication between client and server.""" addr = writer.get_extra_info("peername") while True: data = await reader.readline() if not data: break msg = Message.decode(data.decode()) if not msg: # TODO: Send invalid message message. # pylint: disable=fixme continue cmd_func = self.commands.get(msg.command) if cmd_func is None: LOGGER.warning("Received unknown command %s from %s", msg.command, addr) # TODO: Send unknown command message. # pylint: disable=fixme continue LOGGER.debug("Received %s from %s", msg, addr) LOGGER.debug("Executing command %s", msg.command) reply = await cmd_func(self, msg, **msg.data) LOGGER.debug("Sending: %s", reply) data = reply.encode().encode() writer.write(data) await writer.drain() def add_executor_job(self, func: Callable, *args: Any) -> Coroutine: """Schedule a function to be run in the thread pool. Return a task. """ loop = asyncio.get_running_loop() task = loop.run_in_executor(None, func, *args) if self._track_tasks: self._pending_tasks.append(task) return task async def run_process_job(self, func: Callable, *args: Any) -> Any: """Run a job in the process pool.""" loop = asyncio.get_running_loop() with concurrent.futures.ProcessPoolExecutor() as pool: task = loop.run_in_executor(pool, func, *args) if self._track_tasks: self._pending_tasks.append(task) result = await task return result def create_task(self, coro: Coroutine) -> asyncio.Task: """Schedule a coroutine on the event loop. Use this helper to make sure the task is cancelled on server stop. Return a task. """ task = asyncio.create_task(coro) if self._track_tasks: self._pending_tasks.append(task) return task async def wait_for_tasks(self) -> None: """Wait for all pending tasks.""" await asyncio.sleep(0) while self._pending_tasks: LOGGER.debug("Waiting for pending tasks") pending = [task for task in self._pending_tasks if not task.done()] self._pending_tasks.clear() if pending: await asyncio.wait(pending) else: await asyncio.sleep(0) def main() -> None: """Run server.""" logging.basicConfig(level=logging.DEBUG, format="%(name)s: %(message)s") server = CPIAServer() try: asyncio.run(server.start(), debug=True) except KeyboardInterrupt: asyncio.run(server.stop(), debug=True) if __name__ == "__main__": main()
/setup.py
"""Set up file for cpias package.""" from pathlib import Path from setuptools import find_packages, setup PROJECT_DIR = Path(__file__).parent.resolve() README_FILE = PROJECT_DIR / "README.md" LONG_DESCR = README_FILE.read_text(encoding="utf-8") VERSION = (PROJECT_DIR / "cpias" / "VERSION").read_text().strip() GITHUB_URL = "https://github.com/CellProfiling/cpias" DOWNLOAD_URL = f"{GITHUB_URL}/archive/master.zip" setup( name="cpias", version=VERSION, description="Provide a server for image analysis", long_description=LONG_DESCR, long_description_content_type="text/markdown", author="Martin Hjelmare", author_email="marhje52@gmail.com", url=GITHUB_URL, download_url=DOWNLOAD_URL, packages=find_packages(exclude=["contrib", "docs", "tests*"]), python_requires=">=3.7", install_requires=["click", "voluptuous"], include_package_data=True, entry_points={ "console_scripts": ["cpias = cpias.cli:cli"], "cpias.commands": ["hello = cpias.commands.hello"], }, license="Apache-2.0", zip_safe=False, classifiers=[ "Development Status :: 3 - Alpha", "Framework :: AsyncIO", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering", ], )
/tests/test_message.py
"""Provide tests for message.""" from cpias.message import Message def test_message_decode(): """Test message decode.""" msg = Message.decode( '{"cli": "client-1", "cmd": "hello", "dta": {"param1": "world"}}' ) assert msg.client == "client-1" assert msg.command == "hello" assert msg.data == {"param1": "world"} def test_decode_bad_message(): """Test decode bad message.""" msg = Message.decode("bad") assert not msg msg = Message.decode('["val1", "val2"]') assert not msg def test_message_encode(): """Test message encode.""" msg_string = '{"cli": "client-1", "cmd": "hello", "dta": {"param1": "world"}}\n' msg = Message.decode(msg_string) msg_encoded = msg.encode() assert msg_encoded == msg_string
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Kw4dr4t/WebMovies
refs/heads/master
{"/WebMovies/admin.py": ["/WebMovies/models.py"], "/WebMovies/views.py": ["/WebMovies/models.py"]}
└── └── WebMovies ├── admin.py ├── apps.py ├── migrations │ ├── 0003_movie_description.py │ ├── 0004_auto_20210204_0835.py │ ├── 0005_auto_20210209_0759.py │ └── 0006_auto_20210209_1401.py ├── models.py └── views.py
/WebMovies/admin.py
from django.contrib import admin from .models import AdditionalInfo, Movie # Register your models here. # admin.site.register(Movie) @admin.register(Movie) class MovieAdmin(admin.ModelAdmin): # fields = ["Title", "Description", "Year"] # exclude = ["Description"] list_display = ["title", "imdb_rating", "year"] list_filter = ("year",) search_fields = ("title",) admin.site.register(AdditionalInfo)
/WebMovies/apps.py
from django.apps import AppConfig class WebmoviesConfig(AppConfig): name = 'WebMovies'
/WebMovies/migrations/0003_movie_description.py
# Generated by Django 3.1.6 on 2021-02-04 08:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('WebMovies', '0002_auto_20210204_0806'), ] operations = [ migrations.AddField( model_name='movie', name='description', field=models.TextField(default=''), ), ]
/WebMovies/migrations/0004_auto_20210204_0835.py
# Generated by Django 3.1.6 on 2021-02-04 08:35 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('WebMovies', '0003_movie_description'), ] operations = [ migrations.AddField( model_name='movie', name='imdb_rating', field=models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True), ), migrations.AddField( model_name='movie', name='poster', field=models.ImageField(blank=True, null=True, upload_to='posters'), ), migrations.AddField( model_name='movie', name='premiere', field=models.DateField(blank=True, null=True), ), ]
/WebMovies/migrations/0005_auto_20210209_0759.py
# Generated by Django 3.1.6 on 2021-02-09 07:59 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('WebMovies', '0004_auto_20210204_0835'), ] operations = [ migrations.CreateModel( name='AdditionalInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('duration', models.PositiveIntegerField(default=0)), ('genre', models.PositiveSmallIntegerField(choices=[(8, 'Historical'), (4, 'Crime'), (3, 'Comedy'), (5, 'Drama'), (11, 'Science Fiction'), (0, 'Other'), (9, 'Horror'), (1, 'Action'), (6, 'Experimental'), (10, 'Romance'), (7, 'Fantasy'), (12, 'Thriller'), (13, 'Wester'), (2, 'Animation')], default=0)), ], ), migrations.AddField( model_name='movie', name='additional_info', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='WebMovies.additionalinfo'), ), ]
/WebMovies/migrations/0006_auto_20210209_1401.py
# Generated by Django 3.1.6 on 2021-02-09 14:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('WebMovies', '0005_auto_20210209_0759'), ] operations = [ migrations.AlterField( model_name='additionalinfo', name='genre', field=models.PositiveSmallIntegerField(choices=[(8, 'Historical'), (4, 'Crime'), (7, 'Fantasy'), (3, 'Comedy'), (13, 'Wester'), (11, 'Science Fiction'), (10, 'Romance'), (5, 'Drama'), (2, 'Animation'), (0, 'Other'), (12, 'Thriller'), (9, 'Horror'), (6, 'Experimental'), (1, 'Action')], default=0), ), ]
/WebMovies/models.py
from django.db import models class AdditionalInfo(models.Model): GENRES = { (0, "Other"), (1, "Action"), (2, "Animation"), (3, "Comedy"), (4, "Crime"), (5, "Drama"), (6, "Experimental"), (7, "Fantasy"), (8, "Historical"), (9, "Horror"), (10, "Romance"), (11, "Science Fiction"), (12, "Thriller"), (13, "Wester"), } duration = models.PositiveIntegerField(default=0) genre = models.PositiveSmallIntegerField(default=0, choices=GENRES) class Movie(models.Model): title = models.CharField(max_length=64, blank=False, unique=True) year = models.PositiveSmallIntegerField(default=2000, blank=True) description = models.TextField(default="") premiere = models.DateField(auto_now=False, null=True, blank=True) imdb_rating = models.DecimalField( max_digits=4, decimal_places=2, null=True, blank=True ) poster = models.ImageField(upload_to="posters", null=True, blank=True) additional_info = models.OneToOneField( AdditionalInfo, on_delete=models.CASCADE, null=True, blank=True ) def __str__(self): return self.title_with_year() def title_with_year(self): return "{} ({})".format(self.title, self.year)
/WebMovies/views.py
from django.shortcuts import get_object_or_404, render, redirect from django.http import HttpResponse from WebMovies.models import Movie from .forms import MovieForm from django.contrib.auth.decorators import login_required def all_movies(request): movies_all = Movie.objects.all() return render(request, "movies.html", {"movies": movies_all}) @login_required def new_movie(request): form = MovieForm(request.POST or None, request.FILES or None) if form.is_valid(): form.save() return redirect(all_movies) return render(request, "movie_form.html", {"form": form, "new": True}) @login_required def edit_movie(request, id): movie = get_object_or_404(Movie, pk=id) form = MovieForm(request.POST or None, request.FILES or None, instance=movie) if form.is_valid(): form.save() return redirect(all_movies) return render(request, "movie_form.html", {"form": form, "new": False}) @login_required def delete_movie(request, id): movie = get_object_or_404(Movie, pk=id) if request.method == "POST": movie.delete() return redirect(all_movies) return render(request, "confirm.html", {"movie": movie})
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
tduproject/kagikko2
refs/heads/master
{"/tdu/profiles/views.py": ["/tdu/profiles/forms.py", "/tdu/profiles/models.py"], "/tdu/accounts/views.py": ["/tdu/accounts/forms.py", "/tdu/profiles/models.py"], "/tdu/keijiban/views.py": ["/tdu/profiles/models.py", "/tdu/keijiban/forms.py", "/tdu/polls/models.py", "/tdu/keijiban/models.py"], "/tdu/profiles/admin.py": ["/tdu/profiles/models.py"], "/tdu/profiles/forms.py": ["/tdu/profiles/models.py"], "/tdu/timetable/admin.py": ["/tdu/timetable/models.py"], "/tdu/timetable/views.py": ["/tdu/timetable/models.py", "/tdu/app/models.py"], "/tdu/app/views.py": ["/tdu/app/models.py", "/tdu/polls/models.py"], "/tdu/polls/admin.py": ["/tdu/polls/models.py"], "/tdu/polls/views.py": ["/tdu/polls/models.py"], "/tdu/home/views.py": ["/tdu/polls/models.py", "/tdu/keijiban/models.py"], "/tdu/keijiban/admin.py": ["/tdu/keijiban/models.py"], "/tdu/keijiban/forms.py": ["/tdu/keijiban/models.py"]}
└── └── tdu ├── accounts │ ├── aes.py │ ├── forms.py │ └── views.py ├── app │ ├── migrations │ │ ├── 0001_initial.py │ │ └── 0002_auto_20170708_0057.py │ ├── models.py │ ├── urls.py │ └── views.py ├── home │ └── views.py ├── keijiban │ ├── admin.py │ ├── forms.py │ ├── models.py │ ├── urls.py │ └── views.py ├── polls │ ├── admin.py │ ├── models.py │ ├── urls.py │ └── views.py ├── profiles │ ├── admin.py │ ├── forms.py │ ├── migrations │ │ ├── 0003_auto_20170703_1040.py │ │ ├── 0004_auto_20170703_1043.py │ │ ├── 0005_auto_20170703_1142.py │ │ ├── 0006_auto_20170703_2051.py │ │ └── 0007_auto_20170703_2053.py │ ├── models.py │ ├── urls.py │ └── views.py └── timetable ├── admin.py ├── migrations │ └── 0001_initial.py ├── models.py ├── urls.py └── views.py
/tdu/accounts/aes.py
import codecs from Crypto.Cipher import AES class aesEncryption: # def PaddingMes(self,mes): # mes_length = len(mes) # len = round(mes_length / 16) #四捨五入 # new_mes = mes + " "*len # return new_mes def Encrypt(self,mes1): secret = (b'\xf0\x0e3nE\xa1\x9a\xff\x7f\xf6r\xd6\xf4\x9c\xa9\xaa') counter = (b'\xa7\r\xa5u\xd4\xa0h\xb2\x04\x19<8\x8e\xc6$\x82\xc8\x7f\xe9\x99\x0b3\xe3\x05\xe8\x999j-\xf1\xf7\xd5') crypto = AES.new(counter, AES.MODE_CTR, counter=lambda: secret) mes_length = len(mes1) leng = round(mes_length / 16) # 四捨五入 mes = mes1 + " " * leng encrypted = crypto.encrypt(mes) return encrypted def Decrypt(self,mes2): secret = (b'\xf0\x0e3nE\xa1\x9a\xff\x7f\xf6r\xd6\xf4\x9c\xa9\xaa') counter = ( b'\xa7\r\xa5u\xd4\xa0h\xb2\x04\x19<8\x8e\xc6$\x82\xc8\x7f\xe9\x99\x0b3\xe3\x05\xe8\x999j-\xf1\xf7\xd5') crypto = AES.new(counter, AES.MODE_CTR, counter=lambda: secret) mes = crypto.decrypt(mes2) mes = codecs.decode(mes, 'utf-8') decrypt = mes.strip() return decrypt
/tdu/accounts/forms.py
from django import forms from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm from django.contrib.auth.models import User from django.core.exceptions import ValidationError from django.core.validators import validate_email from django.http import HttpResponse from pprint import pprint class RegisterForm(UserCreationForm): #Djangoデフォルトログインではusernameとpasswordでログインするため #今回はfirst_name をユーザーネームとして扱う #username = email アドレスと考える #required = Trueで登録時必須にする # first_name = forms.CharField(label="ユーザーネーム", required=True) class Meta: model = User fields = ( "username","password1","password2", "email", "first_name", ) def __init__(self, *args,**kwargs): super().__init__(*args, **kwargs) self.fields['username'].widget.attrs['class'] = 'form-control' self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス' # self.fields['first_name'].widget.attrs['class'] = 'form-control' # self.fields['first_name'].widget.attrs['placeholder'] = 'ユーザーネーム' self.fields['password1'].widget.attrs['class'] = 'form-control' self.fields['password1'].widget.attrs['placeholder'] = 'パスワード' self.fields['password2'].widget.attrs['class'] = 'form-control' self.fields['password2'].widget.attrs['placeholder'] = 'パスワード(確認)' def clean_username(self): username = self.cleaned_data["username"] atmark = username.find('@') string = username.find("dendai.ac.jp") if(atmark < 0): raise ValidationError("正しいメールアドレスを指定してください。") if(atmark > string and string < 0): raise ValidationError("電大メールを入力してください") # try: # validate_email(username) # except ValidationError: # raise ValidationError("正しいメールアドレスを指定してください。") try: self.user = User.objects.get(username=username) except User.DoesNotExist: return username else: raise ValidationError("既に存在するメールアドレスです。") class LoginForm(AuthenticationForm): #ログインフォーム作成 #username = email と考える # def __init__(self, *args, **kwargs): # super().__init__(*args, **kwargs) # self.fields['username'].widget.attrs['class'] = 'form-control' # self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス' # # self.fields['password'].widget.attrs['class'] = 'form-control' # self.fields['password'].widget.attrs['placeholder'] = 'パスワード' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['username'].widget.attrs['class'] = 'form-control' self.fields['username'].widget.attrs['placeholder'] = 'メールアドレス' self.fields['password'].widget.attrs['class'] = 'form-control' self.fields['password'].widget.attrs['placeholder'] = 'パスワード' class ForgetPasswordForm(PasswordResetForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['email'].widget.attrs['class'] = 'form-control' self.fields['email'].widget.attrs['placeholder'] = 'メールアドレス' class ChangePasswordForm(PasswordChangeForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['new_password1'].widget.attrs['class'] = 'form-control' self.fields['new_password2'].widget.attrs['class'] = 'form-control' self.fields['old_password'].widget.attrs['class'] = 'form-control' class PasswordConfirmForm(SetPasswordForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['new_password1'].widget.attrs['class'] = 'form-control' self.fields['new_password1'].widget.attrs['placeholder'] = '新パスワード' self.fields['new_password2'].widget.attrs['class'] = 'form-control' self.fields['new_password2'].widget.attrs['placeholder'] = '新パスワード(確認)'
/tdu/accounts/views.py
from django.conf import settings from django.contrib.auth import views as auth_views from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.models import User #from myUserModel.models import User from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail from django.core.urlresolvers import reverse_lazy from django.http import Http404 from django.template.loader import get_template from django.utils.encoding import force_bytes, force_text from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode from django.views import generic from profiles.models import UserProfile from pprint import pprint from django.http import HttpResponse from .forms import ( RegisterForm, LoginForm, ChangePasswordForm, ForgetPasswordForm, PasswordConfirmForm, ) #ユーザー登録 class CreateUserView(generic.FormView): template_name = 'accounts/create.html' form_class = RegisterForm success_url = reverse_lazy('accounts:create_done') def form_valid(self,form): user = form.save(commit=False) user.is_active = False user.email = user.username user.save() current_site = get_current_site(self.request) domain = current_site.domain # subject_template = get_template('mailtemplate/subject.txt') message_template = get_template('mailtemplate/message.txt') context = { 'protocol': 'https' if self.request.is_secure() else 'http', 'domain': domain, 'uid': urlsafe_base64_encode(force_bytes(user.pk)), 'token': default_token_generator.make_token(user), 'user': user, } #subject = subject_template.render(context) message = message_template.render(context) from_email = settings.EMAIL_HOST_USER to = [user.username] send_mail('ご登録ありがとうございます', message, from_email, to ) return super(CreateUserView, self).form_valid(form) class CreateDoneView(generic.TemplateView): template_name = "accounts/create_done.html" class CreateCompleteView(generic.TemplateView): template_name = 'accounts/create_complete.html' def get(self, request, **kwargs): token = kwargs.get("token") uidb64 = kwargs.get("uidb64") try: uid = force_text(urlsafe_base64_decode(uidb64)) user = User.objects.get(pk=uid) except (TypeError, ValueError, OverflowError, User.DoesNotExist): user = None if user and not user.is_active and default_token_generator.check_token(user, token): user.is_active = True user.save() createprofile = UserProfile() createprofile.name = '名無しの電大生' createprofile.email = user.email createprofile.save() return super(CreateCompleteView, self).get(request, **kwargs) else: raise Http404 def password_reset(request): context = { 'post_reset_redirect': reverse_lazy('accounts:password_reset_done'), 'template_name': 'accounts/password_reset_form.html', 'email_template_name': 'mailtemplate/password_reset/message.txt', 'subject_template_name': 'mailtemplate/password_reset/subject.txt', 'password_reset_form': ForgetPasswordForm, } return auth_views.password_reset(request, **context) def password_reset_done(request): context = { 'template_name': 'accounts/password_reset_done.html', } return auth_views.password_reset_done(request, **context) def password_reset_confirm(request, uidb64, token): context = { 'uidb64': uidb64, 'token': token, 'post_reset_redirect': reverse_lazy('accounts:password_reset_complete'), 'template_name': 'accounts/password_reset_confirm.html', 'set_password_form': PasswordConfirmForm, } return auth_views.password_reset_confirm(request, **context) def password_reset_complete(request): context = { 'template_name': 'accounts/password_reset_complete.html', } return auth_views.password_reset_complete(request, **context) def login(request): context = { 'template_name': 'accounts/login.html', 'authentication_form': LoginForm } return auth_views.login(request, **context) def logout(request): context = { 'template_name': 'accounts/login.html' } return auth_views.logout(request, **context)
/tdu/app/migrations/0001_initial.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-05-20 00:08 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, verbose_name='カテゴリ名')), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=255, verbose_name='曜日')), ('text', models.CharField(max_length=255, verbose_name='時間')), ('sub', models.CharField(max_length=255, verbose_name='科目名')), ('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Category', verbose_name='カテゴリ')), ], ), ]
/tdu/app/migrations/0002_auto_20170708_0057.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-08 00:57 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('app', '0001_initial'), ] operations = [ migrations.AddField( model_name='post', name='when', field=models.CharField(default='SOME STRING', max_length=255, verbose_name='時期'), ), migrations.AlterField( model_name='post', name='category', field=models.CharField(default='SOME STRING', max_length=255, verbose_name='カテゴリ名'), ), migrations.DeleteModel( name='Category', ), ]
/tdu/app/models.py
from datetime import datetime from django.db import models class Post(models.Model): title = models.CharField('曜日', max_length=255) text = models.CharField('時間', max_length=255) sub = models.CharField('科目名', max_length=255) category = models.CharField('カテゴリ名', max_length=255 ,default='SOME STRING') when = models.CharField('時期', max_length=255 ,default='SOME STRING') def __str__(self): return self.sub
/tdu/app/urls.py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^csv_import/$', views.csv_import, name='csv_import'), url(r'^csv_export/$', views.csv_export, name='csv_export'), ]
/tdu/app/views.py
import csv from io import TextIOWrapper, StringIO from django.http import HttpResponse from django.shortcuts import redirect from django.views import generic from .models import Post from polls.models import Poll ,Choice class IndexView(generic.ListView): model = Post def csv_import(request): q_array = ['q1','q2','q3'] form_data = TextIOWrapper( request.FILES['csv'].file, encoding='utf-8') if form_data: csv_file = csv.reader(form_data) for line in csv_file: post, _ = Post.objects.get_or_create(pk=line[0]) post.title = line[1] post.text = line[2] post.sub = line[3] mypoll = Poll() mypoll.subname = line[3] mypoll.question1 = "課題の難易度 " mypoll.question2 = "テストの難易度 " mypoll.question3 = "課題の量 " for q in q_array: mychoice = Choice() mychoice.subname = line[3] mychoice.value = q mychoice.save() # category, _ = Category.objects.get_or_create(name=line[4]) post.category = line[4] post.when = line[5] post.save() mypoll.save() return redirect('app:index') def csv_export(request): memory_file = StringIO() writer = csv.writer(memory_file) for post in Post.objects.all(): row = [post.pk, post.title, post.text, post.sub, post.category,post.when] writer.writerow(row) response = HttpResponse( memory_file.getvalue(), content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=db.csv' return response
/tdu/home/views.py
from django.http import HttpResponse from django.shortcuts import render from django.shortcuts import redirect from keijiban.models import Posting from polls.models import Poll from .models import Contact from .forms import ContactForm # Create your views here. def show(request): #最新スレッド5件を表示する処理 #全てのデータを1つのlistに集約 posts_list = Posting.objects.order_by('-created_at') db_poll = Poll.objects.all() pk_list = ["0"] for post in posts_list: for db_post in db_poll: if post.subject == db_post.subname: pk_list.append(db_post.pk) #科目名が同じものを取り除く pk_list.pop(0) i = 0 n = len(pk_list) while i < n: j = 0 while j < i: if pk_list[j] == pk_list[i]: pk_list.pop(i) n = n - 1 i = 0 break j = j + 1 i = i + 1 #5件までにまとめる n = len(pk_list) count = 5 if n > 5: while count < n: pk_list.pop(count) n = n - 1 #お問い合わせフォーム処理 form = ContactForm(request.POST or None) if request.method == 'POST': if form.is_valid(): form.save() return redirect('home:contact') contexts = { 'posts_list': posts_list, 'db_poll': db_poll, 'pk_list': pk_list, 'form':form, } return render(request,'home/home.html', contexts) def contact(request): return render(request, 'home/contact.html')
/tdu/keijiban/admin.py
from django.contrib import admin from keijiban.models import Posting from keijiban.models import PostingSubject # Register your models here. admin.site.register(Posting) admin.site.register(PostingSubject)
/tdu/keijiban/forms.py
from django import forms from .models import Posting from .models import PostingSubject class PostingForm(forms.ModelForm): name = forms.CharField(label="名前", required=True) message = forms.CharField(label="メッセージ", widget=forms.Textarea) class Meta: model = Posting fields = ('name','message','subject','pk_label') # widgets = { # 'name': forms.TextInput(attrs={'size': 40}), # 'message': forms.Textarea(attrs={'cols': 80, 'rows': 20}) # } def __init__(self, *args,**kwargs): super().__init__(*args, **kwargs) self.fields['name'].widget.attrs['class'] = 'form-control' self.fields['name'].widget.attrs['placeholder'] = '名前' self.fields['message'].widget.attrs['class'] = 'form-control' self.fields['message'].widget.attrs['placeholder'] = 'メッセージ' class PostingSubjectForm(forms.ModelForm): class Meta: model = PostingSubject fields = ('subject',) widgets = { 'subject': forms.TextInput(attrs={'size': 40}) } def __init__(self, *args,**kwargs): super().__init__(*args, **kwargs) self.fields['subject'].widget.attrs['class'] = 'form-control' self.fields['subject'].widget.attrs['placeholder'] = '教科'
/tdu/keijiban/models.py
from django.db import models class Posting(models.Model): name = models.CharField( max_length=64, verbose_name='名前', help_text='あなたの名前を入力してください', ) message = models.TextField( verbose_name='メッセージ', help_text='メッセージを入力してください', null=True, ) subject = models.CharField( max_length=64, verbose_name='科目名', null=True, ) created_at = models.DateTimeField( auto_now_add=True, verbose_name='登録日時', ) pk_label = models.IntegerField( null=True, ) class PostingSubject(models.Model): subject = models.CharField( max_length=64, verbose_name='科目名', null=True, )
/tdu/keijiban/urls.py
from django.conf.urls import include, url from . import views urlpatterns = [ url(r'^$', views.post_list, name='list'), url(r'^index/(?P<pk>[0-9]+)/$', views.index, name='index'), ]
/tdu/keijiban/views.py
# ページネーター from django.core.paginator import ( Paginator, # ページネーター本体のクラス EmptyPage, # ページ番号が範囲外だった場合に発生する例外クラス PageNotAnInteger # ページ番号が数字でなかった場合に発生する例外クラス ) from django.shortcuts import ( render, redirect, ) from .models import Posting from .forms import PostingForm from .models import PostingSubject from .forms import PostingSubjectForm from django.contrib import messages from django.shortcuts import render, get_object_or_404 from polls.models import Poll from profiles.models import UserProfile from django.contrib.auth.models import User def post_list(request): posts = Poll.objects.all() return render(request, 'keijiban/post_list.html', {'posts': posts}) def _get_page(list_, page_no, count=100): """ページネーターを使い、表示するページ情報を取得する""" paginator = Paginator(list_, count) try: page = paginator.page(page_no) except (EmptyPage, PageNotAnInteger): # page_noが指定されていない場合、数値で無い場合、範囲外の場合は # 先頭のページを表示する page = paginator.page(1) return page def index(request,pk): """表示・投稿を処理する""" posts = get_object_or_404(Poll, pk=pk) # 教科名と投稿名者をフォームにあらかじめ登録しておく設定 if not request.user.is_authenticated(): #ログインされていない場合は投稿者名が@名無しの電大生になる form = PostingForm(initial={'subject':posts.subname , 'name':"@名無しの電大生", 'pk_label':-1}) else: #ログインされている場合は投稿者名がプロフィールの名前になる email = request.user.email info_personal = UserProfile.objects.get(email = email) #ユーザプロフィールへのリンク情報を付加 link_profile = UserProfile.objects.all() for tmp in link_profile: if tmp.email == email: pk_link = tmp.pk form = PostingForm(initial={'subject':posts.subname , 'name':info_personal.name, 'pk_label':pk_link}) if request.method == 'POST': # ModelFormもFormもインスタンスを作るタイミングでの使い方は同じ form = PostingForm(request.POST or None) if form.is_valid(): # save()メソッドを呼ぶだけでModelを使ってDBに登録される。 form.save() # メッセージフレームワークを使い、処理が成功したことをユーザーに通知する messages.success(request, '投稿を受付ました。') return redirect('keijiban:index',pk=pk) else: # メッセージフレームワークを使い、処理が失敗したことをユーザーに通知する messages.error(request, '入力内容に誤りがあります。') #リストを作成し、該当する講義のデータのみ抽出する db_posts = Posting.objects.order_by('-subject') post_list = ["temp"] for temp in db_posts: if temp.subject == posts.subname: post_list.append(temp) #リストの表示設定 post_list.pop(0) post_list.reverse() page = _get_page( # Posting.objects.order_by('-id'), # 投稿を新しい順に並び替えて取得する post_list, request.GET.get('page') # GETクエリからページ番号を取得する ) contexts = { 'page': page, 'posts': posts, 'form': form, } return render(request, 'keijiban/index.html', contexts)
/tdu/polls/admin.py
from django.contrib import admin from polls.models import Poll , Choice admin.site.register(Poll) admin.site.register(Choice)
/tdu/polls/models.py
# coding: UTF-8 from django.db import models # # アンケート質問モデル # class Poll(models.Model): subname = models.CharField(max_length=200) question1 = models.CharField(max_length=200) question2 = models.CharField(max_length=200) question3 = models.CharField(max_length=200) def __str__(self): return self.subname # # アンケート選択モデル # class Choice(models.Model): subname = models.CharField(max_length=200, default='SOME STRING') value = models.CharField(max_length=200 , default='SOME STRING') easy = models.IntegerField(default=0) normal = models.IntegerField(default=0) hard = models.IntegerField(default=0) def __str__(self): return self.subname
/tdu/polls/urls.py
# coding: UTF-8 from django.conf.urls import url from polls import views urlpatterns = [ url(r'^$', views.poll_list, name = 'poll_list'), url(r'^poll/(?P<pk>[0-9]+)/$', views.poll_detail, name = 'poll_detail'), # ex: /polls/5/ # ex: /polls/5/results/ url(r'^vote/$', views.vote, name='vote'), # ex: /polls/5/vote/ #url(r'^result/$',views.result,name='result'), ]
/tdu/polls/views.py
# coding: UTF-8 from django.http import HttpResponseRedirect, HttpResponse from django.shortcuts import render, get_object_or_404 from django.core.urlresolvers import reverse from django.views import generic from polls.models import Poll, Choice # # 一覧表示 # def poll_list(request): posts = Poll.objects.all() return render(request, 'poll_list/poll_list.html', {'posts': posts}) def poll_detail(request, pk): post = get_object_or_404(Poll, pk=pk) return render(request, 'poll_list/poll_detail.html', {'post': post}) # 投票 # def vote(request): name = request.POST["subname"] choice = Choice.objects.filter(subname = name) q1 = request.POST["select1"] q2 = request.POST["select2"] q3 = request.POST["select3"] Q1 = choice[0] Q2 = choice[1] Q3 = choice[2] if q1 == "e1" : num = Q1.easy Q1.easy = num+1 Q1.save() elif q1 == "n1" : num = Q1.normal Q1.normal = num+1 Q1.save() elif q1 == "h1" : num = Q1.hard Q1.hard = num+1 Q1.save() if q2 == "e2" : num = Q2.easy Q2.easy = num+1 Q2.save() elif q2 == "n2" : num = Q2.normal Q2.normal = num+1 Q2.save() elif q2 == "h2" : num = Q2.hard Q2.hard = num+1 Q2.save() if q3 == "e3" : num = Q3.easy Q3.easy = num+1 Q3.save() elif q3 == "n3" : num = Q3.normal Q3.normal = num+1 Q3.save() elif q3 == "h3" : num = Q3.hard Q3.hard = num+1 Q3.save() return render(request, 'poll_list/poll_result.html', {'Q1' :Q1,'Q2': Q2,'Q3' : Q3 })
/tdu/profiles/admin.py
from django.contrib import admin from .models import UserProfile #UserProfileモデルをインポート admin.site.register(UserProfile) #モデルをadminページで見るにはこれで登録
/tdu/profiles/forms.py
from django import forms from .models import UserProfile GRADE_CHOICES = ( ('1年', '1年生'), ('2年', '2年生'), ('3年', '3年生'), ('4年', '4年生'), ('院1年', '院1年生'), ('院2年', '院2年生'), ('教員', '教員'), ) MAJOR_CHOICES = ( ('RB', 'RB'), ('RD', 'RD'), ('RG', 'RG'), ('RT', 'RT'), ('RU', 'RT'), ) class UserProfileForm(forms.ModelForm): name = forms.CharField(label="名前", required=True) text = forms.CharField(label="コメント", widget=forms.Textarea) class Meta: model = UserProfile fields = ('name', 'grade', 'major', 'text') grade = forms.ChoiceField( label='学年', widget=forms.Select, choices=GRADE_CHOICES, required=False, ) major = forms.ChoiceField( label='学系', widget=forms.Select, choices=MAJOR_CHOICES, required=False, ) def __init__(self, *args,**kwargs): super().__init__(*args, **kwargs) self.fields['name'].widget.attrs['class'] = 'form-control' self.fields['name'].widget.attrs['placeholder'] = '名前' self.fields['text'].widget.attrs['class'] = 'form-control' self.fields['text'].widget.attrs['placeholder'] = 'コメント'
/tdu/profiles/migrations/0003_auto_20170703_1040.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-03 10:40 from __future__ import unicode_literals from django.db import migrations import encrypted_fields.fields class Migration(migrations.Migration): dependencies = [ ('profiles', '0002_userprofile_email'), ] operations = [ migrations.AlterField( model_name='userprofile', name='email', field=encrypted_fields.fields.EncryptedEmailField(default='example@example.com', max_length=254), ), migrations.AlterField( model_name='userprofile', name='grade', field=encrypted_fields.fields.EncryptedCharField(max_length=5), ), migrations.AlterField( model_name='userprofile', name='major', field=encrypted_fields.fields.EncryptedCharField(max_length=5), ), migrations.AlterField( model_name='userprofile', name='name', field=encrypted_fields.fields.EncryptedCharField(max_length=20), ), migrations.AlterField( model_name='userprofile', name='text', field=encrypted_fields.fields.EncryptedTextField(), ), ]
/tdu/profiles/migrations/0004_auto_20170703_1043.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-03 10:43 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('profiles', '0003_auto_20170703_1040'), ] operations = [ migrations.AlterField( model_name='userprofile', name='email', field=models.EmailField(default='example@example.com', max_length=254), ), ]
/tdu/profiles/migrations/0005_auto_20170703_1142.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-03 11:42 from __future__ import unicode_literals from django.db import migrations import encrypted_fields.fields class Migration(migrations.Migration): dependencies = [ ('profiles', '0004_auto_20170703_1043'), ] operations = [ migrations.AlterField( model_name='userprofile', name='grade', field=encrypted_fields.fields.EncryptedCharField(max_length=254), ), migrations.AlterField( model_name='userprofile', name='major', field=encrypted_fields.fields.EncryptedCharField(max_length=254), ), ]
/tdu/profiles/migrations/0006_auto_20170703_2051.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-03 20:51 from __future__ import unicode_literals from django.db import migrations import encrypted_fields.fields class Migration(migrations.Migration): dependencies = [ ('profiles', '0005_auto_20170703_1142'), ] operations = [ migrations.AlterField( model_name='userprofile', name='grade', field=encrypted_fields.fields.EncryptedIntegerField(), ), ]
/tdu/profiles/migrations/0007_auto_20170703_2053.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-03 20:53 from __future__ import unicode_literals from django.db import migrations, models import encrypted_fields.fields class Migration(migrations.Migration): dependencies = [ ('profiles', '0006_auto_20170703_2051'), ] operations = [ migrations.AlterField( model_name='userprofile', name='grade', field=models.CharField(max_length=254), ), migrations.AlterField( model_name='userprofile', name='name', field=encrypted_fields.fields.EncryptedCharField(max_length=254), ), ]
/tdu/profiles/models.py
from django.db import models from django.utils import timezone from encrypted_fields import EncryptedTextField ,EncryptedEmailField,EncryptedCharField,EncryptedIntegerField class UserProfile(models.Model): name = EncryptedCharField(max_length = 254) email = models.EmailField(max_length= 254 , default = 'example@example.com') grade = models.CharField(max_length = 254) major = EncryptedCharField(max_length = 254) text = EncryptedTextField() # name = models.CharField(max_length = 20) # email = models.EmailField(max_length = 254,default='example@me.com') # grade = models.CharField(max_length = 5) # major = models.CharField(max_length = 5) # text = models.TextField() def publish(self): self.save() def __str__(self): return self.name
/tdu/profiles/urls.py
#profilesの全てのviewをインポートするよ from django.conf.urls import include, url from . import views urlpatterns = [ url(r'^detail/(?P<pk>[0-9]+)/$', views.profile_detail, name = 'profile_detail'), url(r'^edit/$', views.profile_edit, name='profile_edit'), url(r'^mydetail/$', views.profile_mydetail, name = 'profile_mydetail'), ]
/tdu/profiles/views.py
from django.shortcuts import render from .models import UserProfile from django.shortcuts import render, get_object_or_404 from .forms import UserProfileForm from django.shortcuts import redirect from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.http import HttpResponse def profile_detail(request, pk): post = get_object_or_404(UserProfile, pk=pk) #return HttpResponse(request.user.email) return render(request, 'profiles/profile_detail.html', {'post': post}) @login_required def profile_mydetail(request): email = request.user.email post = UserProfile.objects.get(email = email) return render(request, 'profiles/profile_mydetail.html', {'post': post}) @login_required def profile_edit(request): email = request.user.email post = UserProfile.objects.get(email = email) if request.method == "POST": #form = UserProfileForm(request.POST, instance=post) post.name = request.POST["name"] post.text = request.POST["text"] post.major = request.POST["major"] post.grade = request.POST["grade"] post.save() # if form.is_valid(): # post = form.save(commit=False) # post.save() # return redirect('profile_mydetail') return redirect('profile_mydetail') else: form = UserProfileForm(instance=post) return render(request, 'profiles/profile_edit.html', {'form': form})
/tdu/timetable/admin.py
from django.contrib import admin from .models import Timetable1,Timetable2 admin.site.register(Timetable1) admin.site.register(Timetable2)
/tdu/timetable/migrations/0001_initial.py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-07-08 00:57 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Timetable1', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('username', models.CharField(max_length=255, verbose_name='名前')), ('day', models.CharField(max_length=255, verbose_name='曜日')), ('time', models.CharField(max_length=255, verbose_name='時間')), ('sub', models.CharField(max_length=255, verbose_name='科目名')), ('when', models.CharField(max_length=255, verbose_name='時期')), ], ), migrations.CreateModel( name='Timetable2', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('username', models.CharField(max_length=255, verbose_name='名前')), ('day', models.CharField(max_length=255, verbose_name='曜日')), ('time', models.CharField(max_length=255, verbose_name='時間')), ('sub', models.CharField(max_length=255, verbose_name='科目名')), ('when', models.CharField(max_length=255, verbose_name='時期')), ], ), ]
/tdu/timetable/models.py
from datetime import datetime from django.db import models class Timetable1(models.Model): username = models.CharField('名前', max_length=255) day = models.CharField('曜日', max_length=255) time = models.CharField('時間', max_length=255) sub = models.CharField('科目名', max_length=255) when = models.CharField('時期', max_length=255) def __str__(self): return self.username class Timetable2(models.Model): username = models.CharField('名前', max_length=255) day = models.CharField('曜日', max_length=255) time = models.CharField('時間', max_length=255) sub = models.CharField('科目名', max_length=255) when = models.CharField('時期', max_length=255) def __str__(self): return self.username
/tdu/timetable/urls.py
from django.conf.urls import url from .import views urlpatterns = [ url(r'^$', views.time_table, name = 'table'), #追加 7/9 山田 url(r'^timeedit/$', views.time_table2, name='edit'), url(r'^result/$', views.show, name='result'), ]
/tdu/timetable/views.py
from django.shortcuts import render from app.models import Post from .models import Timetable1,Timetable2 from django.http import HttpResponse,HttpResponseRedirect from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required # Create your views here. def time_table(request): posts = Post.objects.all() #return HttpResponse(request.user.email) return render(request, 'timetable/timetable.html', {'posts': posts}) @login_required def time_table2(request): username = request.user.email subject = request.POST["timetable"] # listnames = ["月1","月2","月3","月4","月5","火] listnames = list() when = ["後期"] days = ["月","火","水","木","金"] times = ["1","2","3","4","5"] tuti = ["土1","土2","土3","土4"] #検索するリストを作成 for day in days: for time in times: data = day + time listnames.append(data) for element in tuti: listnames.append(element) #listnamesで作られた、リストを用いて、request.Postで送信されたデータを保存 #何も選択されていない場合、保存しない #現在保存されている時間割を、検索 user_timetable1 = Timetable1.objects.filter(username = username) for day2 in listnames: week = day2[0] num = day2[1] mytime = Timetable1() if user_timetable1.count() == 0: t1 = request.POST[day2] if t1 == request.POST[day2]: if t1 != 'null': mytime.username = username mytime.day = day2[0] mytime.time = day2[1] mytime.sub = t1 mytime.when = "前期" mytime.save() else: t1 = request.POST[day2] if t1 == request.POST[day2]: if t1 != 'null': for timetable in user_timetable1: if timetable.day == week and timetable.time == num: print(timetable.sub) timetable.delete() mytime.username = username mytime.day = day2[0] mytime.time = day2[1] mytime.sub = t1 mytime.when = "前期" mytime.save() else: mytime.username = username mytime.day = day2[0] mytime.time = day2[1] mytime.sub = t1 mytime.when = "前期" mytime.save() listnames2 = list() tuti2 = ["後期土1","後期土2","後期土3"] for day in days: for time in times: data = when[0] + day + time listnames2.append(data) for elemnt2 in tuti2: listnames2.append(elemnt2) user_timetable2 = Timetable2.objects.filter(username=username) for day2 in listnames2: week = day2[2] num = day2[3] mytime = Timetable2() if user_timetable2.count() == 0: t1 = request.POST[day2] if t1 == request.POST[day2]: if t1 != 'null': mytime.username = username mytime.day = day2[2] mytime.time = day2[3] mytime.sub = t1 mytime.when = "後期" mytime.save() else: t1 = request.POST[day2] if t1 == request.POST[day2]: if t1 != 'null': for timetable in user_timetable2: if timetable.day == week and timetable.time == num: timetable.delete() mytime.username = username mytime.day = day2[2] mytime.time = day2[3] mytime.sub = t1 mytime.when = "後期" mytime.save() else: mytime.username = username mytime.day = day2[2] mytime.time = day2[3] mytime.sub = t1 mytime.when = "後期" mytime.save() return HttpResponseRedirect('/timetable/result') def show(request): username = request.user.email post1 = Timetable1.objects.filter(username = username) post2 = Timetable2.objects.filter(username = username) return render(request, 'timetable/result.html', {'post1': post1,'post2': post2})
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
shawntan/predict-forum-pgm
refs/heads/master
{"/lib/evaluation/evaluate.py": ["/lib/io/reader.py", "/lib/evaluation/pairwise.py", "/lib/interfaces/model_utils.py", "/lib/evaluation/sliding_window.py", "/lib/io/reporting.py"], "/lib/evaluation/evaluate_window.py": ["/lib/io/reader.py", "/lib/interfaces/model_utils.py", "/lib/io/reporting.py"], "/lib/interfaces/extractor_utils.py": ["/lib/io/reader.py", "/lib/io/util.py", "/lib/io/reporting.py"], "/lib/io/util.py": ["/lib/io/reader.py"], "/predictor.py": ["/lib/io/reader.py", "/hist_to_probdist.py"], "/lib/training/trainer_test.py": ["/lib/evaluation/evaluate_window.py", "/lib/evaluation/evaluate.py"], "/lib/interfaces/generic_model.py": ["/lib/interfaces/model_utils.py"], "/lib/evaluation/play.py": ["/lib/interfaces/model_utils.py", "/lib/io/util.py", "/lib/io/reporting.py"], "/lib/interfaces/model_utils.py": ["/lib/io/reporting.py"]}
└── ├── generate_plots ├── hist_to_probdist.py ├── learn_topics ├── lib │ ├── evaluation │ │ ├── analyse_bins.py │ │ ├── evaluate.py │ │ ├── evaluate_window.py │ │ ├── pairwise.py │ │ ├── play.py │ │ └── sliding_window.py │ ├── graphs.py │ ├── interfaces │ │ ├── extractor_utils.py │ │ ├── generic_model.py │ │ └── model_utils.py │ ├── io │ │ ├── __init__.py │ │ ├── dataset.py │ │ ├── pickled_globals.py │ │ ├── reader.py │ │ ├── reporting.py │ │ ├── util.py │ │ └── writer.py │ ├── options │ │ ├── __init__.py │ │ ├── config.py │ │ └── options.py │ └── training │ ├── test.py │ ├── trainer.py │ └── trainer_test.py ├── preamble.py ├── predictor.py └── print_topics
/generate_plots
#!/bin/python2 import cPickle as pickle import lib.io.pickled_globals import lib.graphs as graphs for i in range(1,10): hist = pickle.load(open('graphs/histograms/w%d_histograms'%(i+1),'rb')) graphs.plot_hist(20,hist, upper = 50,directory='hist_%d_topics'%(i+1))
/hist_to_probdist.py
#!/usr/bin/python2 import os import numpy as np import cPickle as pickle from collections import defaultdict import lib.io.pickled_globals import lib.graphs as graphs def main(): directory = 'graphs/prob_dist' if not os.path.exists(directory): os.makedirs(directory) for i in range(1,10): hist = pickle.load(open('graphs/histograms/w%d_histograms'%(i+1),'rb')) model = [] topic_dist = [] for topic_hist in hist: total = sum(topic_hist[i] for i in topic_hist) prob_dist = defaultdict(float,((i,topic_hist[i]/float(total)) for i in topic_hist)) model.append(prob_dist) topic_dist.append(total) print prob_dist topic_dist = np.array(topic_dist)/float(sum(topic_dist)) pickle.dump(topic_dist,open('graphs/prob_dist/dist_t%03d_prior'%i,'wb')) pickle.dump(model,open('graphs/prob_dist/dist_t%03d'%i,'wb')) def time_dist(topic_dist,prior,model,limit = 24*3*2): t_dist = np.zeros(limit) for i in range(limit): t_dist[i] = sum( model[t][i] * topic_dist[t] * prior[t] for t in range(len(topic_dist))) t_dist = t_dist/sum(t_dist) return t_dist if __name__ == '__main__': main() model = pickle.load(open('graphs/prob_dist/dist_t%03d'%9,'rb')) #print model print time_dist([0.1 for i in range(9)],model)
/learn_topics
#!/usr/bin/python from algo.lda import LDASampler from lib.io.reader import windowed,filter_tokenise import pickle import sys from collections import defaultdict window_size_max = int(sys.argv[1]) num_topics_max = int(sys.argv[2]) documents = ["data/%s"%i.strip() for i in open(sys.argv[3])] output = sys.argv[-1] window_size = 15 print "Loading file" docs = [' '.join(w[2]) for w,_ in windowed(documents,window_size)] print "Tokenising documents." tokenised_docs = [filter_tokenise(i) for i in docs] for num_topics in range(num_topics_max,num_topics_max+1): print "Window size = %d, Topics = %d"%(window_size,num_topics) lda = LDASampler( docs=tokenised_docs, num_topics=num_topics, alpha=0.25, beta=0.25) print 'Sampling...' for _ in range(100): # zs = lda.assignments # print zs # print '[%i %i] [%i %i]' % (zs[0][3], zs[1][3], zs[2][3], zs[3][3]) lda.next() print print 'words ordered by probability for each topic:' tks = lda.topic_keys() for i, tk in enumerate(tks): print '%3d'%i , tk[:10] print '%3s'%'', tk[10:20] print '%3s'%'', tk[20:30] print print 'document keys:' dks = lda.doc_keys() #print 'topic assigned to each word of first document in the final iteration:' size = 20 time_differences = [dt for _,dt in windowed(documents,window_size)] bin_list = [] for i in range(num_topics): bins = defaultdict(float) bin_list.append(bins) for dt, doc, dk in zip(time_differences, docs, dks): print '%5d'%dt + '\t'+\ doc[:40] +"..." + '\t' +\ str(dk) for p,i in dk: bin = int(float(dt)/size) bin_list[i][bin] += p print lda.doc_distribution(lda.docs[0]) print lda.doc_distribution(lda.docs[1]) print lda.doc_distribution(lda.docs[2]) print lda.doc_distribution(lda.docs[3]) print lda.doc_distribution(lda.docs[-4]) print lda.doc_distribution(lda.docs[-3]) print lda.doc_distribution(lda.docs[-2]) print lda.doc_distribution(lda.docs[-1]) FILE = open("w%d_t%d_%s"%(window_size,num_topics,output),'wb') pickle.dump(lda,FILE) FILE.close() #graphing. """ for i in range(num_topics): bins = defaultdict(float) bin_list.append(bins) #plot_hist(size,bin_list) """
/lib/evaluation/analyse_bins.py
import sys,operator import shelve import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np import bsddb3 from collections import defaultdict K = int(sys.argv[1]) output_file = sys.argv[2] transit_file = sys.argv[3] bins = shelve.BsdDbShelf(bsddb3.hashopen('bins.data', 'r')) #bins = shelve.open('bins.data','r') out = open(output_file,'w') keys = [int(key) for key in bins] keys.sort() for key in keys: key = str(key) print "Evaluating ",key, " ..." sorted_top = sorted( bins[key].iteritems(), key=operator.itemgetter(1), reverse = True)[:K] total = sum(v for _,v in sorted_top) sorted_top = map(lambda tup: (tup[0],float(tup[1])/total), sorted_top) out.write('%10d\t'%(20*int(key))) out.write('\t'.join('%10s' %i for i,_ in sorted_top) + '\n') out.write('%10s\t'%"") out.write('\t'.join('%10.5f'%i for _,i in sorted_top) + '\n') out.close() bins.close() states = set() #time_trans = shelve.open('trans_bins.data','r') time_trans = shelve.BsdDbShelf(bsddb3.hashopen('trans_bins.data', 'r')) state_total = defaultdict(int) transited_to = set() transited_from = set() for key in time_trans: p,n = [int(i) for i in key.split('-')] transited_to.add(n) transited_from.add(p) transited_to = sorted(list(transited_to)) transited_from = sorted(list(transited_from)) for i in transited_from: state_total[i] = sum(time_trans.get("%d-%d"%(i,j),0) for j in transited_to) """ out=open(transit_file,'w') out.write('\t'.join("%5s"%j for j in transited_to)+ '\n') for i in transited_from: out.write('\t'.join("%5.4f"%(time_trans.get("%d-%d"%(i,j),0)/float(state_total[i]))for j in transited_to)+ '\n') out.close() """ def pdensity(dimI,dimJ): print "Creating sparse matrix %d,%d"%(dimI,dimJ) #pd = lil_matrix((dimI,dimJ),dtype=np.float32) pd = np.zeros((dimI,dimJ),dtype=np.float32) for key in time_trans: i,j = [int(i) for i in key.split('-')] if i > dimI or j > dimJ: continue pd[i-1,j-1] = time_trans[key]/float(state_total[i]) return pd # make these smaller to increase the resolution #x = arange(0, transited_from[-1], 1) #y = arange(0, transited_to[-1], 1) print "Constructing density matrix..." #Z = pdensity(transited_from[-1], transited_to[-1]) Z = pdensity(100, 100) fig = plt.figure() #plt.imshow(Z.toarray(),cmap=cm.Greys) im = plt.imshow(Z,cmap=cm.Greys,interpolation='nearest') #im.set_interpolation('bicubic') #ax.set_image_extent(-3, 3, -3, 3) #plt.axis([0,200*20, 0, 200*20]) #fig.savefig('collated/%s'%output) plt.title("Density matrix plot of $p(q_{t+1}|q_t)$") plt.xlabel("$q_{t+1}$ (20 minute blocks)") plt.ylabel("$q_{t}$ (20 minute blocks)") plt.show()
/lib/evaluation/evaluate.py
#!/usr/bin/python2 from lib.io.reader import windowed from lib.io.reporting import reporting_init,timestamp_log from lib.io.util import * from lib.options import * from lib.interfaces.model_utils import unpickle_model from lib.evaluation.sliding_window import SlidingWindow from lib.evaluation.pairwise import PairwiseScoring def evaluate(threadfile, model, extractor, window_size = 1, bandwidth = 1000000, LAG_TIME = 10, offset = 0, sliding_window_size = 120, verbose = False ): posts_log, visit_log, result_log_tscore,result_log_window = timestamp_log( 'posts', 'visit', 't_score', 'sliding_window') try: time = 0 d_visit = LAG_TIME time_visit = time time_visit += d_visit post_buffer = [] t_score_cum = 0 count = 0 visits = 0 correct_count,wrong_count = 0,0 w = SlidingWindow(K = 20, alpha = 0.5) ps = PairwiseScoring() for window,d_t in windowed([threadfile],window_size, offset): #post being made if verbose: print "%d\t-->"%time posts_log.write("%d\n"%time) w.event('post',time) ps.event('post',time) assert(time_visit - time > 0) t_score_cum += time_visit-time count += 1 time_post = time + d_t post_buffer.append((extractor.extract(window),d_t)) last_post_time = time while time_visit <= time_post: #visit being made time = time_visit if verbose: print "%d\t<--"%time visits += 1 visit_log.write("%d\n"%time) w.event('visit',time) ps.event('visit',time) #start correction d_visit = None if post_buffer: feature_vec,_ = post_buffer[-1] d_visit = model.predict( feature_vec,d_t, current_d_t = time - last_post_time, unseen = post_buffer[:-1] ) if post_buffer: post_buffer = [] time_visit = last_post_time + d_visit assert(time < time_visit) #end correction time = time_post Pr_miss, Pr_fa, Pr_error = w.pr_error() result_log_window.write(str(Pr_miss) + ' , ' + str(Pr_fa) + '\n') model.add_experiment('prerror_test',threadfile,Pr_error) model.add_experiment('pairwise_scoring',threadfile,ps.score()) t_score = t_score_cum/float(count) result_log_tscore.write(str(t_score)+'\n') model.add_experiment('t-score_test',threadfile,t_score) #save_model(pickle_file,model) model.save() return { 'T-score': t_score, 'Pr_error': (Pr_miss,Pr_fa,Pr_error), 'Visits': visits, 'Posts': count, 'Pairwise': ps.score() #'Invalid Predictions': (correct_count+wrong_count, # wrong_count/float(correct_count+wrong_count)) } except Exception: raise finally: posts_log.close() visit_log.close() result_log_tscore.close() result_log_window.close() if __name__ == "__main__": o,args = read_options() reporting_init(o,"reports") extractor = load_from_file(o.extractor_name, "Extractor") model = load_from_file(o.model_name,"Model",o) if o.pickled_model: pickle_file = o.pickled_model model = unpickle_model(open(pickle_file,'rb')) result = evaluate( o.test_file, model, extractor, o.window_size, verbose = o.verbose ) #print result #for i,j in windowed(["thread"],1):print j
/lib/evaluation/evaluate_window.py
#!/usr/bin/python2 from lib.io.reader import windowed from lib.io.reporting import reporting_init,timestamp_log from lib.io.util import * from lib.options import * from lib.interfaces.model_utils import unpickle_model def evaluate(threadfile, model, extractor, window_size = 1, bandwidth = 1000000, LAG_TIME = 10, offset=0): posts_log, visit_log, result_log = timestamp_log( 'posts', 'visit', 'sliding_window') try: time = 0 d_visit = LAG_TIME time_visit = time time_visit += d_visit post_buffer = [] visits = 0 visit_times = [] posts_times = [] for window,d_t in windowed([threadfile],window_size,offset): #post being made print "%d\t-->"%time posts_log.write("%d\n"%time) posts_times.append(time) assert(time_visit - time > 0) time_post = time + d_t post_buffer.append(window) last_post_time = time while time_visit <= time_post: #visit being made time = time_visit print "%d\t<--"%time visits += 1 visit_log.write("%d\n"%time) visit_times.append(time) if post_buffer: feature_vec = extractor.extract(post_buffer[-1]) d_visit = model.predict(feature_vec,d_t) post_buffer = [] else: d_visit = model.repredict() p_from_last_post = last_post_time + d_visit if time < p_from_last_post: time_visit = p_from_last_post else: d_visit = model.repredict() time_visit = time + d_visit time = time_post k = 120 N = int(max(visit_times[-1],posts_times[-1])) sum_Phi = 0 sum_Psi = 0 sum_ref = 0 for i in range(N-k): r = len([j for j in posts_times if j >= i and j < i + k ]) h = len([j for j in visit_times if j >= i and j < i + k ]) if r > 0: sum_ref += 1 if r > h: sum_Phi += 1 elif r < h: sum_Psi += 1 Pr_miss = float(sum_Phi)/sum_ref Pr_fa = float(sum_Psi)/float(N-k) Pr_error = 0.5*Pr_miss + 0.5*Pr_fa result_log.write(str(Pr_miss) + ' , ' + str(Pr_fa) + '\n') model.add_experiment('prerror_test',threadfile,Pr_error) model.save() return Pr_error,visits except Exception: raise finally: posts_log.close() visit_log.close() result_log.close() eval_file = None model_name = None extr_name = None class Extractor: def extract(self,window): return window[0] if __name__ == "__main__": o,args = read_options() reporting_init(o,"reports") extractor = load_from_file(o['extractor_name'], "Extractor") model = load_from_file(o['model_name'],"Model",o) if o.has_key('pickled_model'): pickle_file = o['pickled_model'] model = unpickle_model(open(pickle_file,'rb')) result = evaluate( o['test_file'], model, extractor, pickle_file, o['window_size'] ) print result #for i,j in windowed(["thread"],1):print j
/lib/evaluation/pairwise.py
import math class PairwiseScoring(): def __init__(self,scoring = { ('visit','visit') : lambda e1,e2: math.exp(0.01*(e1-e2)), ('post', 'visit') : lambda e1,e2: 1-math.exp(0.01*(e1-e2)), ('post', 'post' ) : lambda e1,e2: 0 , ('visit','post' ) : lambda e1,e2: 0}): self.total_score = 0 self.count = 0 self.prev_event = (None,0) self.scoring = scoring def event(self,event_type,time): if self.prev_event[0]: et1,et2 = self.prev_event[0],event_type t1,t2 = self.prev_event[1],time score = self.scoring[et1,et2](float(t1),float(t2)) #print "%10s\t%10s\t%10d\t%10d\t%10.10f"%(et1,et2,t1,t2,score) if score > 0 : self.count += 1 self.total_score += score self.prev_event = (event_type,time) def score(self): return self.total_score/self.count if __name__ == "__main__": k = 10 posts = [(t*10 ,'post') for t in range(10)] +\ [(t*10 ,'post') for t in range(30,40)] visit = [(t+13 ,'visit') for t,_ in posts] sum = 0 for i in range(len(posts)-1): a,b = posts[i:i+2] sum += b[0]-a[0] events = posts + visit events.sort() posts_times = [i for i,_ in posts] visit_times = [i for i,_ in visit] w = PairwiseScoring() for t,e in events: w.event(e,t) print w.score()
/lib/evaluation/play.py
from lib.io.reporting import set_directory from lib.io.util import load_from_file from lib.options import * from lib.interfaces.model_utils import unpickle_model import os import glob import matplotlib as mpl mpl.use('Agg') from matplotlib import pyplot import numpy as np def plot(values,output, x_axis = 'Values', y_axis = 'Frequency', title = 'Histogram', range_min = None, range_max = None): if range_min != None: values = [v for v in values if v >= range_min] if range_max != None: values = [v for v in values if v <= range_max] fig = pyplot.figure() n, bins, patches = pyplot.hist( values, 60, facecolor = 'green', alpha=0.75 ) print n, bins, patches pyplot.xlabel(x_axis) pyplot.ylabel(y_axis) pyplot.title(title) pyplot.axis([min(values),max(values),0,max(n)]) pyplot.grid(True) fig.savefig('collated/%s'%output) def scatter_plot(x_vals,y_vals,c_vals,output, x_axis = 'Values', y_axis = 'Frequency', title = 'Scatterplot'): fig = pyplot.figure() ax = fig.add_subplot(1,1,1) ax.set_yscale('log') #ax.set_xscale('log') pyplot.ylim((0.1,1000)) pyplot.xlim((0,7500)) pyplot.scatter(x_vals,y_vals,c=c_vals, cmap=mpl.cm.Greens) pyplot.xlabel(x_axis) pyplot.ylabel(y_axis) pyplot.title(title) fig.savefig('collated/%s'%output) ws_ex = [ #('Average $w = %d$', '*w%d_winavg*', 'w%d_dt_average.result'), #('$w=%d,\\dtvec$', '*w%d_dt-*', 'w%d_rbf_dt'), #('$w=%d,\\dtvec,\\ctxvec$', '*w%d_dt_ctx*', 'w%d_rbf_dt_ctx'), #('$w=%d,\\vocab$', '*w%d_lang-*', 'w%d_rbf_lang_fs'), #('$\\alpha=%0.1f,\\vocab$', '*w%0.1f_lang_decay-*', 'w%0.1f_rbf_lang_fs_decay'), #('$w=%d,\\vocab$,p', '*w%d_lang_punc-*', 'w%d_rbf_lang_p_fs') #('$w=%d,\\vocab,\\dtvec$', '*w%d_lang_dt-*', 'w%d_rbf_lang_dt_fs'), #('$w=%d,\\vocab,\\dtvec$', '*w%d_lang_dt_decay-*', 'w%d_rbf_lang_dt_fs') #('cluster', '*cluster_time-*','cluster_time') ] vocab_size_ex = [ ('$\\vocab,|\\vocab|=%d', '*w15_lang_top%d-*', 'vocab-size%d'), ] patterns = [] alpha_sizes = [5,10,15,20,25,30,35,40,45,50] for i,j,k in vocab_size_ex: patterns += [(i%w,j%w,k%w) for w in alpha_sizes] if __name__ == '__main__': o,args = read_options() #extractor = load_from_file(o['extractor_name'], "Extractor") for n in glob.glob('models/*.py'): load_from_file(n,"Model",o) summary = open('collated/summary','w') header_tuple = [ 'MAPE', '$Pr_{miss}$', '$Pr_{fa}$', '$Pr_{error}$', '$T$-score', #'Inv. pred', #'Posts', #'Visits', 'Pairwise', 'Visit/Post' ] summary.write('%20s &\t'%'') summary.write(' &\t'.join("%10s"%i for i in header_tuple) + ' \\\\\n\\hline\n') for l_col,p,outfile in patterns: print 'pickled_models/'+p+'/model' files = glob.glob('pickled_models/'+p+'/model') log_file = open('collated/'+outfile,'w') log_file_coeffs = open('collated/'+outfile+'_coeffs','w') print len(files) count = 0 sum_tup = [0]*len(header_tuple) log_file.write('\t'.join("%10s"%i for i in header_tuple) + '\n') regression_perfs = [] t_scores = [] pv_ratios = [] tscore_pv_plot = [] posts_vals = [] for pickle_file in files: set_directory(os.path.dirname(pickle_file)) model = unpickle_model(open(pickle_file,'rb')) print model.experiments for k in model.experiments: exps = model.experiments[k] values = dict((e_name,result) for e_name,_,result in exps) if values.has_key('visit_evaluation'): try: #print values regression_perf = values['regression_test(partial thread)'] pr_miss,pr_fa,pr_error = values['visit_evaluation']['Pr_error'] t_score = values['visit_evaluation']['T-score'] posts = values['visit_evaluation']['Posts'] visits = values['visit_evaluation']['Visits'] filename = values['visit_evaluation']['filename'] pairwise = values['visit_evaluation']['Pairwise'] pv_ratio = visits/float(posts) #inv_preds = values['visit_evaluation']['Invalid Predictions'][1] tuple = [ regression_perf, pr_miss, pr_fa, pr_error, t_score, pairwise, #inv_preds, pv_ratio ] regression_perfs.append(regression_perf) t_scores.append(t_score) pv_ratios.append(pv_ratio) posts_vals.append(posts) sum_tup = [s + i for s,i in zip(sum_tup,tuple)] count += 1 log_file.write('\t'.join("%10.3f"%i for i in tuple) +\ '\t' + filename + '\n') except KeyError as ke: print ke if values.has_key('token_score'): coeffs = values['token_score'] log_file_coeffs.write('\t'.join("%10s"%i for _,i in coeffs[:-1]) + '\n') log_file_coeffs.write('\t'.join("%10.3f"%i for i,_ in coeffs[:-1]) + '\t' +\ "%10.3f"%coeffs[-1] + '\n') """ plot( output = 'mape_dist_%s.png'%outfile, values = regression_perfs, x_axis = 'MAPE', ) plot( output = 't_score_dist_%s.png'%outfile, values = t_scores, x_axis = '$T$-score', ) plot( output = 'pv_ratio_dist_%s.png'%outfile, values = pv_ratios, x_axis = 'Post/Visit ratio' ) """ scatter_plot( x_vals = t_scores, y_vals = pv_ratios, c_vals = posts_vals, x_axis = '$T$-scores', y_axis = 'Post/Visit ratio', output = 'tscore_pv_plot%s.png'%outfile, title = '$T$-score vs. Post/Visit ratio' ) avg_tup = [float(s)/count for s in sum_tup] log_file.write('\n') log_file.write('\t'.join("%10.3f"%i for i in avg_tup) + '\n') summary.write('%20s &\t'%l_col) summary.write(' &\t'.join("%10.3f"%i for i in avg_tup) + ' \\\\\n') log_file.close() log_file_coeffs.close() summary.close()
/lib/evaluation/sliding_window.py
class SlidingWindow(): def __init__(self,K = 60, alpha = 0.5): self.window = [] self.low = 0 self.window_size = K self.alpha = alpha self.phi_count = 0 self.psi_count = 0 self.ref_count = 0 self.all_count = 0 def event(self,event_type,time): time = int(time) if time >= self.low + self.window_size : low = self.low for t in range(low, time - self.window_size + 1): #print t self.low = t #Add appropriate counts if self.window: while self.window[0][0] < self.low: self.window.pop(0) if not self.window: break self.count() self.low = t + 1 self.window.append((time,event_type)) #print self.low, self.window[0] while self.window[0][0] < self.low: self.window.pop(0) if not self.window: break else: self.window.append((time,event_type)) #print self.window def count(self): R = [j for j,et in self.window if et == 'post'] H = [j for j,et in self.window if et == 'visit'] #print H, self.low + self.window_size -1 r = len(R) h = len(H) if r > 0: self.ref_count += 1 if r > h: self.phi_count += 1 elif r < h: self.psi_count += 1 self.all_count += 1 def pr_error(self): pr_miss = float(self.phi_count)/self.ref_count pr_fa = float(self.psi_count)/(self.all_count) pr_error = self.alpha*pr_miss + (1-self.alpha)*pr_fa return pr_miss, pr_fa, pr_error if __name__ == "__main__": k = 10 posts = [(t*2 ,'post') for t in range(10)] +\ [(t*2 ,'post') for t in range(30,40)] visit = [(t*8+1 ,'visit') for t in range(10)] sum = 0 for i in range(len(posts)-1): a,b = posts[i:i+2] sum += b[0]-a[0] w = SlidingWindow(K =int(float(sum)*0.5/(len(posts) -1)) ) events = posts + visit events.sort() print events[-1] posts_times = [i for i,_ in posts] visit_times = [i for i,_ in visit] """ sum_Phi = 0 sum_Psi = 0 sum_ref = 0 for i in range(events[-1][0]-k + 1): R = [j for j in posts_times if j >= i and j < i + k ] H = [j for j in visit_times if j >= i and j < i + k ] print H, i + k - 1 r = len(R) h = len(H) if r > 0: sum_ref += 1 if r > h: sum_Phi += 1 elif r < h: sum_Psi += 1 Pr_miss = float(sum_Phi)/sum_ref Pr_fa = float(sum_Psi)/float(events[-1][0]-k + 1) Pr_error = 0.5*Pr_miss + 0.5*Pr_fa print Pr_miss,Pr_fa,Pr_error """ for t,e in events: w.event(e,t) print w.pr_error()
/lib/graphs.py
import os def plot_hist(bin_size, bin_list, directory=None, upper=None): if not os.path.exists(directory): os.makedirs(directory) import matplotlib.pyplot as plt count = 1 for bins in bin_list: fig = plt.figure() ax = fig.add_subplot(1,1,1) up_bound = upper or max(bins) x = [i for i in range(up_bound+1)] y = [bins[i] for i in range(up_bound+1)] # print x # print y ax.bar(x,y,width=1) if not directory: plt.show() else: plt.savefig('%s/%03d'%(directory, count)) count += 1
/lib/interfaces/extractor_utils.py
''' Created on Jul 19, 2012 @author: shawn ''' from lib.io.reporting import get_directory from lib.options import read_options from lib.io.reader import windowed from lib.io.util import load_from_file import pickle def save_model(filename,model): f = open("%s/%s"%(get_directory(),filename),'wb') pickle.dump(model,f) f.close() def unpickle_model(filepath): return pickle.load(filepath) if __name__ == '__main__': o,args = read_options() extractor = load_from_file(o['extractor_name'], "Extractor") for window,d_t in windowed([o['test_file']],o['window_size']): print extractor.extract(window),d_t extractor.save()
/lib/interfaces/generic_model.py
''' Created on Jul 17, 2012 @author: shawn ''' import md5 from model_utils import save_model from collections import defaultdict class GenericModel(object): epsilon = 0.1 def __init__(self,o): self.options = o self.experiments = defaultdict(list) def predict(self,feature_vec = None, d_t = None, current_d_t = None): pred = self.avg if current_d_t: k = 0 while k*self.avg + pred <= current_d_t + self.epsilon: k += 1 return k*self.avg + pred else: return pred def ensure_prediction_conditions(self,pred,feature_vec,d_t,current_d_t): if current_d_t: if pred > current_d_t + self.epsilon: return pred else: return GenericModel.predict(self,feature_vec,d_t,current_d_t) else: return pred def add_experiment(self,test_type,test_files,result): if hasattr(test_files,'sort'): test_files.sort() names = '\n'.join(test_files) else: names = test_files key = md5.new(names).hexdigest() self.experiments[key].append((test_type,test_files,result)) def save(self): return save_model('model', self)
/lib/interfaces/model_utils.py
''' Created on Jul 19, 2012 @author: shawn ''' from lib.io.reporting import get_directory import pickle def save_model(filename,model): fullpath = "%s/%s"%(get_directory(),filename) f = open(fullpath,'wb') pickle.dump(model,f) f.close() return fullpath def unpickle_model(filepath): return pickle.load(filepath)
/lib/io/__init__.py
import pickled_globals import writer
/lib/io/dataset.py
import numpy as np from sklearn import linear_model from itertools import permutations from lang_model import Extractor from utils.reader import * import csv,sys count = 0 clf = linear_model.LinearRegression() filenames = [sys.argv[1]] filename_x = "X" filename_y = "Y" window_size = 15 e = Extractor() count = sum(1 for _ in windowed(filenames,window_size)) class RewinderWindow(): def __init__(self,filenames,window_size): self.filenames = filenames self.window_size = window_size def reset(self): return windowed(self.filenames,self.window_size) e.train(RewinderWindow(filenames,window_size)) e.finalise() def first(vec_size,vec_count): X = np.memmap( filename_x, mode = 'w+', shape = (vec_count,vec_size), dtype="float64" ) Y = np.memmap( filename_y, mode = "w+", shape = (vec_count,), dtype = "float64" ) return X,Y X,Y = None,None for i,instance in enumerate(windowed(filenames,window_size)): window, d_t = instance x_vec = e.extract(window) if i == 0: X,Y = first(len(x_vec),count) X[i][:] = x_vec[:] Y[i] = d_t print X, X.shape print Y, Y.shape
/lib/io/pickled_globals.py
import cPickle as pickle class pickled_globals(object): def __init__(self,pg_dir): self.pg_dir = pg_dir def __getattr__(self, attr_name): """ Loads the file from pg_dir into an object, then caches the object in memory. """ obj = pickle.load(open('%s/%s'%(self.pg_dir,attr_name),'rb')) self.__setattr__(attr_name,obj) return obj pg = pickled_globals('global_objs')
/lib/io/reader.py
#!/usr/bin/python2 import nltk,re from nltk.stem.porter import PorterStemmer import time bin_size = 10 users = set() def text_tdelta(input_file): prev_tup = None for line in open(input_file): tup = line.split('\t') if prev_tup: yield ( (float(tup[0])-float(prev_tup[0]))/60, tup[1].strip(), tup[2].strip(), time.localtime(float(tup[0])) ) prev_tup = tup def class_text(threadfiles): for threadfile in threadfiles: for line in open(threadfile): tup = line.split('\t') users.add(tup[1]) for threadfile in threadfiles: for td,text,t in text_tdelta(threadfile): yield (td,text,t) def windowed(threadfiles,N, offset = -1): count = 0 for threadfile in threadfiles: window = [None] prev_window = None for tup in text_tdelta(threadfile): window.append(tup) if prev_window: if count <= offset: count += 1 else:yield prev_window,tup[0] if len(window) > N: window.pop(0) result = [None]*len(tup) for i in range(len(tup)): result[i] = [t[i] for t in window] prev_window = tuple(result) def filter_tokenise(text): text = text.lower() r = [] for w in re.split('[^0-9a-z\.\$]+',text): w = preprocess(w) if w: r.append(w) return r non_alphanum = re.compile('\W') number = re.compile('[0-9]') splitter = re.compile('[\s\.\-\/]+') model = re.compile('([.\#]+\w+|\w+[.\#]+)') stemmer = PorterStemmer() stop_words = set(nltk.corpus.stopwords.words('english')) def preprocess(word): global users w = word w = w.lower() if w in stop_words: return w = number.sub("#",w) if model.match(w): return #w = "#MODEL#" if w in users: return "#USER#" w = stemmer.stem_word(w) if len(w) < 3 : return return w
/lib/io/reporting.py
REPORTS = None SUBDIR = None import sys,os from datetime import datetime def set_directory(directory): global SUBDIR SUBDIR = directory def get_directory(): global SUBDIR return SUBDIR def reporting_init(options,directory): global SUBDIR,REPORTS REPORTS = directory SUBDIR = '%s/%s'%(directory,datetime.now().strftime('%Y%m%d%H%M') +\ (' - %s'%options.experiment_name\ if options.experiment_name else '')) ensure_dir(SUBDIR) with open("%s/%s"%(SUBDIR,'command'),'w') as f: f.write(sys.executable) f.write(' ') f.write(sys.argv[0]) for i in sys.argv[1:]: if i[0] == '-': f.write(' \\\n\t') f.write(i) else: f.write(' ') f.write('"%s"'%i) f.write('\n') def ensure_dir(f): if not os.path.exists('./%s'%f): os.makedirs(f) def timestamp_log(*filenames): test = [open("%s/%s"%(SUBDIR,f),'w') for f in filenames] if len(test) == 1: return test[0] else: return test def timestamp_model(*filenames): test = [open("%s/%s"%(SUBDIR,f),'wb') for f in filenames] if len(test) == 1: return test[0] else: return test def write_value(key,value): with open("%s/%s"%(SUBDIR,key),'w') as f:f.write('%s\n'%value)
/lib/io/util.py
from reader import windowed import sys, imp, traceback, md5, pickle def load_from_file(filepath,class_name,*params): class_inst = None """ mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1]) if file_ext.lower() == '.py': py_mod = imp.load_source(mod_name, filepath) elif file_ext.lower() == '.pyc': py_mod = imp.load_compiled(mod_name, filepath) """ try: try: #code_dir = os.path.dirname(filepath) #code_file = os.path.basename(filepath) fin = open(filepath, 'rb') module_name = md5.new(filepath).hexdigest() py_mod = imp.load_source(module_name, filepath, fin) print "%s loaded as %s"%(filepath,module_name) finally: try: fin.close() except: pass except ImportError: traceback.print_exc(file = sys.stderr) raise except: traceback.print_exc(file = sys.stderr) raise if hasattr(py_mod, class_name): class_ = getattr(py_mod,class_name) class_inst = class_(*params) return class_inst def extracted_vecs(extractor, filename, window_size, first = None): for window,d_t in windowed([filename],window_size): feature_vec = extractor.extract(window) yield feature_vec,d_t
/lib/io/writer.py
''' Created on Sep 24, 2012 @author: shawn ''' from lib.options.config import configuration as config import __builtin__ class FileWrapper(object): def __init__(self,obj): self._obj = obj def close(self,*args,**kwargs): print "Closing file..." self._obj.close(*args,**kwargs) def __getattr__(self, attr): # see if this object has attr # NOTE do not use hasattr, it goes into # infinite recurrsion if attr in self.__dict__: # this object has it return getattr(self, attr) # proxy to the wrapped object return getattr(self._obj, attr) def marked_open(*params): global _open #print params if len(params) > 1 and (params[1] == 'w' or params[1] == 'wb' or params[1] == 'w+'): print "Opening file..." return FileWrapper(_open(*params)) else: return _open(*params) _open = __builtin__.open __builtin__.open = marked_open """ def __defattr__(self,attr): if hasattr(self.obj, attr): attr_value = getattr(self.obj,attr) if isinstance(attr_value,types.MethodType): def callable(*args, **kwargs): return attr_value(*args, **kwargs) return callable else: return attr_value else: raise AttributeError """
/lib/options/__init__.py
import options,config
/lib/options/config.py
import ConfigParser from collections import namedtuple sections = ['dirs','filename_formats'] def subconf(section): Conf = namedtuple(section,(k for k,_ in c.items(section))) conf = Conf(**dict(c.items(section))) return conf c = ConfigParser.RawConfigParser()#allow_no_value=True) c.readfp(open('config','r')) PConf = namedtuple('Configuration',sections) d = dict((sect,subconf(sect)) for sect in sections) configuration = PConf(**d)
/lib/options/options.py
from optparse import OptionParser from random import random opts,args = None,None p_opts = None def read_options(): global opts,args p = OptionParser() p.add_option("-M","--model",metavar = "MODEL_PATH.py", action = "store", dest = "model_name", help = "Model to be used for current experiment") p.add_option("-E","--extractor",metavar = "EXTRACTOR_PATH.py", action = "store", dest = "extractor_name", help = "Extractor to be used for current experiment") p.add_option("-t","--test-file", metavar = "FILE", action = "store", dest = "test_file", help = "file model will be evaluated on") p.add_option("-n","--name",metavar = "NAME", action = "store", dest = "experiment_name", help = "Name given to experiment") p.add_option("-S","--pickled-extractor",metavar = "PICKLED_EXTRACTOR", action = "store", dest = "pickled_extractor", help = "Pickled extractor to be used for current experiment\n\ --extractor must be specified") p.add_option("-P","--pickled-model",metavar = "PICKLED_MODEL", action = "store", dest = "pickled_model", help = "Pickled model to be used for current experiment\n\ --model must be specified") p.add_option("-N","--window-size",metavar = "N", type = "int", default = 1, action = "store", dest = "window_size", help = "Window size to segment thread stream into") p.add_option("-B","--bandwidth",metavar = "BW", action = "store", dest = "bandwidth",type = "int",default = 1000, help = "Bandwidth limit. Default is 1000") p.add_option("-v","--verbose", action = "store_true", dest = "verbose", help = "print extra debug information") (opts,args) = p.parse_args() print opts,args if not opts.extractor_name: opts.extractor_name = opts.model_name if opts.experiment_name and opts.experiment_name.endswith('RANDOM'): opts.experiment_name = opts.experiment_name.replace( 'RANDOM', str(random.randint(100,999))) return opts,args import sys def read_model_extractor_options(args,extractor=None,model=None): global p_opts p = OptionParser() try: extractor.opt_cfg(p) except: print "Extractor has no options" try: model.opt_cfg(p) except: print "Model has no options" p_opts,args = p.parse_args(args) print p_opts return args if __name__=="__main__": read_options()
/lib/training/test.py
import lda from utils.reader import windowed,filter_tokenise import sys import matplotlib.pyplot as plt from collections import defaultdict def plot_hist(bin_size,bin_list, upper =None): for bins in bin_list: fig = plt.figure() ax = fig.add_subplot(1,1,1) up_bound = upper or max(bins) x = [i for i in range(up_bound+1)] y = [bins[i] for i in range(up_bound+1)] # print x # print y ax.bar(x,y,width=1) plt.show() docs = [' '.join(w[2]) for w,_ in windowed(sys.argv[2:],int(sys.argv[1]))] tokenised_docs = [filter_tokenise(i) for i in docs] num_topics = 3 lda = lda.LDASampler( docs=tokenised_docs, num_topics=num_topics, alpha=0.25, beta=0.25) print 'Sampling...' for _ in range(100): zs = lda.assignments #print zs #print '[%i %i] [%i %i]' % (zs[0][3], zs[1][3], zs[2][3], zs[3][3]) lda.next() print print 'words ordered by probability for each topic:' tks = lda.topic_keys() for i, tk in enumerate(tks): print '%3d'%i , tk[:10] # print '%3s'%'', tk[10:20] # print '%3s'%'', tk[20:30] print print 'document keys:' dks = lda.doc_keys() size = 20 time_differences = [dt for _,dt in windowed(sys.argv[2:],int(sys.argv[1]))] bin_list = [] for i in range(num_topics): bins = defaultdict(float) bin_list.append(bins) for dt, doc, dk in zip(time_differences, docs, dks): print '%5d'%dt + '\t'+\ doc[:40] +"..." + '\t' +\ str(dk) for p,i in dk: bin = int(float(dt)/size) bin_list[i][bin] += p plot_hist(size,bin_list) #print 'topic assigned to each word of first document in the final iteration:' #lda.doc_detail(0)
/lib/training/trainer.py
from utils.reader import windowed from utils.reporting import * from utils.util import * from regression_performance import performance import pickle,math,getopt def train(model,extractor,filenames,window_size,iterations = 1): for _ in range(iterations): for f in filenames: try: model.train(extracted_vecs(extractor,f,window_size)) except ValueError as e: raise e model.finalise() return model.save() """ f = timestamp_model('model') pickle.dump(model,f) f.close() """ def train_extractor(extractor,filenames,window_size): extractor.train(windowed(filenames,window_size)) extractor.finalise() return extractor.save() if __name__ == "__main__": o,args = read_options() reporting_init(o,"pickled_models") extractor = load_from_file(o.extractor_name, "Extractor") model = load_from_file(o.model_name,"Model",o) if hasattr(extractor,'train'): train_extractor( extractor,args,o.window_size) filename = train(model,extractor,args,o.window_size) print performance(model,extractor,[o.test_file],o.window_size,o.verbose,filename)
/lib/training/trainer_test.py
from utils.reader import windowed from utils.reporting import * from utils.util import * import pickle,math,getopt from evaluate_window import evaluate as evaluate_window from utils.options import read_options, read_model_extractor_options def train(model,extractor,iterator,window_size,iterations = 1): for _ in range(iterations): model.train(iterator) model.finalise() return model.save() """ f = timestamp_model('model') pickle.dump(model,f) f.close() """ def performance(model,extractor,rest_instances,window_size,verbose,model_file): print "Calculating MAPE" print "=====================" total_percent_error = 0 count = 0 for fv,d_t in rest_instances: p = model.predict(fv) if d_t > 0: percent_error = math.fabs(float(p - d_t)/d_t) if verbose: print "delta_t: %d\tpredicted: %d\tAPE: %0.2f"%( d_t, p, percent_error ) total_percent_error += percent_error count += 1 ave_percentage_error = total_percent_error/count return ave_percentage_error def train_extractor(extractor,filenames,window_size): extractor.train(windowed(filenames,window_size)) extractor.finalise() return extractor.save() def file_len(fname): with open(fname) as f: for i, l in enumerate(f): pass return i + 1 from evaluate import evaluate if __name__ == "__main__" 'Visit/Post': o,args = read_options() reporting_init(o,"pickled_models") extractor = load_from_file(o.extractor_name, "Extractor") model = load_from_file(o.model_name,"Model",o) args = read_model_extractor_options(args,extractor,model) print "Training extractor..." if hasattr(extractor,'train'): train_extractor( extractor,args,o.window_size) instances = [i for i in extracted_vecs(extractor,args[0],o.window_size)] instance_count = len(instances) if instance_count < 2: print "Insufficient instances" sys.exit() reporting_init(o,"pickled_models") train_count = int(instance_count*0.75) trainset,testset = instances[:train_count],instances[train_count:] #trainset,testset = instances,instances #print trainset print "Instance split:",len(trainset),len(testset) print "Training model..." filename = train( model, extractor, trainset, o.window_size) print "Evaluating..." ave_percentage_error = performance(model,extractor,testset,o.window_size,o.verbose,filename) print ave_percentage_error model.add_experiment('regression_test(partial thread)',filename,ave_percentage_error) result = evaluate(args[0], model, extractor, o.window_size, o.bandwidth, offset = train_count, sliding_window_size=sum(i for _,i in trainset)/len(trainset), verbose = o.verbose) result['filename'] = args[0] result['offset'] = train_count print model.experiments model.add_experiment('visit_evaluation',filename,result) model.save()
/preamble.py
import cPickle as pickle import lib.io.pickled_globals import lib.graphs as graphs
/predictor.py
import cPickle as pickle import sys import lib.io.pickled_globals import lib.graphs as graphs from hist_to_probdist import time_dist from lib.io.reader import windowed,filter_tokenise window_size = 15 time_bin = 20 def load_model(topics): timdist = pickle.load(open('graphs/prob_dist/dist_t%03d'%topics,'rb')) lda = pickle.load( open('global_objs/w%d_t%d_learnt_topics'%(window_size,topics),'rb') ) prior = pickle.load(open('graphs/prob_dist/dist_t%03d_prior'%topics,'rb')) return lda,timdist,prior def main(): print "loading documents..." documents = ['data/'+i.strip() for i in open(sys.argv[1],'r')] print documents lda, time_model,prior = load_model(9) docs = ((' '.join(w[2]),dt) for w,dt in windowed(documents,window_size)) for doc,dt in docs: topic_dist = lda.doc_distribution(filter_tokenise(doc)) dt_dist = time_dist(topic_dist,time_model,prior,limit=24*3*7) print sum((i*(time_bin/2)) * p for i,p in enumerate(dt_dist)), dt if __name__ == "__main__": main()
/print_topics
#!/bin/python2 import cPickle as pickle import lib.io.pickled_globals import lib.graphs as graphs for i in range(1,10): print "Loading pickled topic model with %d topics ..."%(i+1) lda = pickle.load(open('global_objs/w15_t%d_learnt_topics'%(i+1),'rb')) print "Retrieving topics..." top_tok = lda.topic_keys(num_displayed=100) print "Writing to files..." for j, tk in enumerate(top_tok): out = open('graphs/hist_%d_topics/%03d'%(i+1,j+1),'w') out.write('\t'.join(tk) + '\n') out.close()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
supasate/FBPCS
refs/heads/main
{"/fbpcs/decorator/error_handler.py": ["/fbpcs/error/mapper/aws.py"], "/tests/error/mapper/test_aws.py": ["/fbpcs/error/mapper/aws.py"], "/fbpcs/service/log_cloudwatch.py": ["/fbpcs/gateway/cloudwatch.py", "/fbpcs/service/log.py"], "/tests/gateway/test_cloudwatch.py": ["/fbpcs/gateway/cloudwatch.py"], "/fbpcs/entity/mpc_instance.py": ["/fbpcs/entity/instance_base.py", "/fbpcs/entity/container_instance.py"], "/fbpcs/repository/instance_s3.py": ["/fbpcs/entity/instance_base.py", "/fbpcs/service/storage_s3.py"], "/tests/util/test_yaml.py": ["/fbpcs/util/yaml.py"], "/fbpcs/service/mpc.py": ["/fbpcs/service/onedocker.py", "/fbpcs/util/typing.py", "/fbpcs/service/storage.py", "/fbpcs/entity/container_instance.py", "/fbpcs/service/container.py", "/fbpcs/entity/mpc_instance.py"], "/tests/service/test_onedocker.py": ["/fbpcs/service/onedocker.py", "/fbpcs/entity/container_instance.py"], "/fbpcs/service/container_aws.py": ["/fbpcs/util/typing.py", "/fbpcs/entity/container_instance.py", "/fbpcs/service/container.py", "/fbpcs/gateway/ecs.py"], "/tests/util/test_typing.py": ["/fbpcs/util/typing.py"], "/fbpcs/service/storage_s3.py": ["/fbpcs/util/s3path.py", "/fbpcs/service/storage.py", "/fbpcs/gateway/s3.py"], "/onedocker/onedocker_runner.py": ["/fbpcs/util/s3path.py", "/fbpcs/service/storage_s3.py", "/onedocker/util.py"], "/tests/util/test_reflect.py": ["/fbpcs/util/s3path.py", "/fbpcs/util/reflect.py"], "/tests/util/test_s3path.py": ["/fbpcs/util/s3path.py"], "/tests/service/test_storage.py": ["/fbpcs/service/storage.py"], "/fbpcs/gateway/cloudwatch.py": ["/fbpcs/decorator/error_handler.py"], "/fbpcs/gateway/ec2.py": ["/fbpcs/decorator/error_handler.py", "/fbpcs/mapper/aws.py", "/fbpcs/entity/vpc_instance.py"], "/fbpcs/gateway/ecs.py": ["/fbpcs/decorator/error_handler.py", "/fbpcs/mapper/aws.py", "/fbpcs/entity/container_instance.py"], "/fbpcs/gateway/s3.py": ["/fbpcs/decorator/error_handler.py"], "/tests/decorator/test_error_handler.py": ["/fbpcs/decorator/error_handler.py"], "/tests/gateway/test_ec2.py": ["/fbpcs/gateway/ec2.py", "/fbpcs/entity/vpc_instance.py"], "/tests/mapper/test_aws.py": ["/fbpcs/mapper/aws.py", "/fbpcs/entity/container_instance.py"], "/tests/repository/test_instance_s3.py": ["/fbpcs/service/storage_s3.py", "/fbpcs/repository/instance_s3.py", "/fbpcs/entity/mpc_instance.py"], "/tests/service/test_storage_s3.py": ["/fbpcs/service/storage_s3.py"], "/tests/service/test_mpc.py": ["/fbpcs/service/mpc.py", "/fbpcs/entity/container_instance.py", "/fbpcs/entity/mpc_instance.py"], "/fbpcs/mapper/aws.py": ["/fbpcs/entity/container_instance.py", "/fbpcs/entity/vpc_instance.py"], "/fbpcs/service/container.py": ["/fbpcs/entity/container_instance.py"], "/fbpcs/service/onedocker.py": ["/fbpcs/entity/container_instance.py", "/fbpcs/service/container.py"], "/tests/gateway/test_ecs.py": ["/fbpcs/entity/container_instance.py", "/fbpcs/gateway/ecs.py"], "/tests/service/test_container_aws.py": ["/fbpcs/entity/container_instance.py", "/fbpcs/service/container_aws.py"], "/tests/service/test_log_cloudwatch.py": ["/fbpcs/service/log_cloudwatch.py"], "/fbpcs/repository/mpc_instance_local.py": ["/fbpcs/entity/mpc_instance.py"], "/onedocker/tests/test_util.py": ["/onedocker/util.py"], "/tests/gateway/test_s3.py": ["/fbpcs/gateway/s3.py"]}
└── ├── fbpcs │ ├── decorator │ │ └── error_handler.py │ ├── entity │ │ ├── container_instance.py │ │ ├── instance_base.py │ │ ├── mpc_instance.py │ │ └── vpc_instance.py │ ├── error │ │ └── mapper │ │ └── aws.py │ ├── gateway │ │ ├── cloudwatch.py │ │ ├── ec2.py │ │ ├── ecs.py │ │ └── s3.py │ ├── mapper │ │ └── aws.py │ ├── repository │ │ ├── instance_s3.py │ │ ├── mpc_game_repository.py │ │ └── mpc_instance_local.py │ ├── service │ │ ├── container.py │ │ ├── container_aws.py │ │ ├── log.py │ │ ├── log_cloudwatch.py │ │ ├── mpc.py │ │ ├── onedocker.py │ │ ├── storage.py │ │ └── storage_s3.py │ └── util │ ├── reflect.py │ ├── s3path.py │ ├── typing.py │ └── yaml.py ├── onedocker │ ├── env.py │ ├── onedocker_runner.py │ ├── tests │ │ └── test_util.py │ └── util.py ├── setup.py └── tests ├── decorator │ └── test_error_handler.py ├── error │ └── mapper │ └── test_aws.py ├── gateway │ ├── test_cloudwatch.py │ ├── test_ec2.py │ ├── test_ecs.py │ └── test_s3.py ├── mapper │ └── test_aws.py ├── repository │ └── test_instance_s3.py ├── service │ ├── test_container_aws.py │ ├── test_log_cloudwatch.py │ ├── test_mpc.py │ ├── test_onedocker.py │ ├── test_storage.py │ └── test_storage_s3.py └── util ├── test_reflect.py ├── test_s3path.py ├── test_typing.py └── test_yaml.py
/fbpcs/decorator/error_handler.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Callable from botocore.exceptions import ClientError from fbpcs.error.mapper.aws import map_aws_error from fbpcs.error.pcs import PcsError def error_handler(f: Callable) -> Callable: def wrap(*args, **kwargs): try: return f(*args, **kwargs) except ClientError as err: raise map_aws_error(err) except Exception as err: raise PcsError(err) return wrap
/fbpcs/entity/container_instance.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from dataclasses import dataclass from enum import Enum from typing import Optional from dataclasses_json import dataclass_json class ContainerInstanceStatus(Enum): UNKNOWN = "UNKNOWN" STARTED = "STARTED" COMPLETED = "COMPLETED" FAILED = "FAILED" @dataclass_json @dataclass class ContainerInstance: instance_id: str ip_address: Optional[str] = None status: ContainerInstanceStatus = ContainerInstanceStatus.UNKNOWN
/fbpcs/entity/instance_base.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import abc class InstanceBase(abc.ABC): @abc.abstractmethod def get_instance_id(self) -> str: pass @abc.abstractmethod def __str__(self) -> str: pass
/fbpcs/entity/mpc_instance.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Mapping, Optional from dataclasses_json import dataclass_json from fbpcs.entity.container_instance import ContainerInstance from fbpcs.entity.instance_base import InstanceBase class MPCRole(Enum): SERVER = "SERVER" CLIENT = "CLIENT" class MPCInstanceStatus(Enum): UNKNOWN = "UNKNOWN" CREATED = "CREATED" STARTED = "STARTED" COMPLETED = "COMPLETED" FAILED = "FAILED" @dataclass_json @dataclass class MPCInstance(InstanceBase): instance_id: str game_name: str mpc_role: MPCRole num_workers: int server_ips: Optional[List[str]] containers: List[ContainerInstance] status: MPCInstanceStatus game_args: Optional[List[Dict[str, Any]]] arguments: Mapping[str, Any] def __init__( self, instance_id: str, game_name: str, mpc_role: MPCRole, num_workers: int, ip_config_file: Optional[str] = None, server_ips: Optional[List[str]] = None, containers: Optional[List[ContainerInstance]] = None, status: MPCInstanceStatus = MPCInstanceStatus.UNKNOWN, game_args: Optional[List[Dict[str, Any]]] = None, **arguments # pyre-ignore ) -> None: self.instance_id = instance_id self.game_name = game_name self.mpc_role = mpc_role self.num_workers = num_workers self.ip_config_file = ip_config_file self.server_ips = server_ips self.containers = containers or [] self.status = status self.game_args = game_args self.arguments = arguments def get_instance_id(self) -> str: return self.instance_id def __str__(self) -> str: # pyre-ignore return self.to_json()
/fbpcs/entity/vpc_instance.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from dataclasses import dataclass, field from enum import Enum from typing import Dict from dataclasses_json import dataclass_json class VpcState(Enum): UNKNOWN = "UNKNOWN" PENDING = "PENDING" AVAILABLE = "AVAILABLE" @dataclass_json @dataclass class Vpc: vpc_id: str state: VpcState = VpcState.UNKNOWN tags: Dict[str, str] = field(default_factory=lambda: {})
/fbpcs/error/mapper/aws.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from botocore.exceptions import ClientError from fbpcs.error.pcs import PcsError from fbpcs.error.throttling import ThrottlingError # reference: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html def map_aws_error(error: ClientError) -> PcsError: code = error.response["Error"]["Code"] message = error.response["Error"]["Message"] if code == "ThrottlingException": return ThrottlingError(message) else: return PcsError(message)
/fbpcs/gateway/cloudwatch.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from typing import Any, Dict, Optional import boto3 from fbpcs.decorator.error_handler import error_handler class CloudWatchGateway: def __init__( self, region: str = "us-west-1", access_key_id: Optional[str] = None, access_key_data: Optional[str] = None, config: Optional[Dict[str, Any]] = None, ) -> None: self.region = region config = config or {} if access_key_id: config["aws_access_key_id"] = access_key_id if access_key_data: config["aws_secret_access_key"] = access_key_data # pyre-ignore self.client = boto3.client("logs", region_name=self.region, **config) @error_handler def get_log_events(self, log_group: str, log_stream: str) -> Dict[str, Any]: return self.client.get_log_events( logGroupName=log_group, logStreamName=log_stream )
/fbpcs/gateway/ec2.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from typing import Any, Dict, List, Optional import boto3 from fbpcs.decorator.error_handler import error_handler from fbpcs.entity.vpc_instance import Vpc from fbpcs.mapper.aws import map_ec2vpc_to_vpcinstance class EC2Gateway: def __init__( self, region: str, access_key_id: Optional[str], access_key_data: Optional[str], config: Optional[Dict[str, Any]] = None, ) -> None: self.region = region config = config or {} if access_key_id is not None: config["aws_access_key_id"] = access_key_id if access_key_data is not None: config["aws_secret_access_key"] = access_key_data # pyre-ignore self.client = boto3.client("ec2", region_name=self.region, **config) @error_handler def describe_vpcs(self, vpc_ids: List[str]) -> List[Vpc]: response = self.client.describe_vpcs(VpcIds=vpc_ids) return [map_ec2vpc_to_vpcinstance(vpc) for vpc in response["Vpcs"]] @error_handler def describe_vpc(self, vpc_id: str) -> Vpc: return self.describe_vpcs([vpc_id])[0] @error_handler def list_vpcs(self) -> List[str]: all_vpcs = self.client.describe_vpcs() return [vpc["VpcId"] for vpc in all_vpcs["Vpcs"]]
/fbpcs/gateway/ecs.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from typing import Any, Dict, List, Optional import boto3 from fbpcs.decorator.error_handler import error_handler from fbpcs.entity.cluster_instance import Cluster from fbpcs.entity.container_instance import ContainerInstance from fbpcs.mapper.aws import ( map_ecstask_to_containerinstance, map_esccluster_to_clusterinstance, ) class ECSGateway: def __init__( self, region: str, access_key_id: Optional[str], access_key_data: Optional[str], config: Optional[Dict[str, Any]] = None, ) -> None: self.region = region config = config or {} if access_key_id is not None: config["aws_access_key_id"] = access_key_id if access_key_data is not None: config["aws_secret_access_key"] = access_key_data # pyre-ignore self.client = boto3.client("ecs", region_name=self.region, **config) @error_handler def run_task( self, task_definition: str, container: str, cmd: str, cluster: str, subnet: str ) -> ContainerInstance: response = self.client.run_task( taskDefinition=task_definition, cluster=cluster, networkConfiguration={ "awsvpcConfiguration": { "subnets": [subnet], "assignPublicIp": "ENABLED", } }, overrides={"containerOverrides": [{"name": container, "command": [cmd]}]}, ) return map_ecstask_to_containerinstance(response["tasks"][0]) @error_handler def describe_tasks(self, cluster: str, tasks: List[str]) -> List[ContainerInstance]: response = self.client.describe_tasks(cluster=cluster, tasks=tasks) return [map_ecstask_to_containerinstance(task) for task in response["tasks"]] @error_handler def describe_task(self, cluster: str, task: str) -> ContainerInstance: return self.describe_tasks(cluster, [task])[0] @error_handler def list_tasks(self, cluster: str) -> List[str]: return self.client.list_tasks(cluster=cluster)["taskArns"] @error_handler def stop_task(self, cluster: str, task_id: str) -> Dict[str, Any]: return self.client.stop_task( cluster=cluster, task=task_id, ) @error_handler def describe_clusters(self, clusters: List[str]) -> List[Cluster]: response = self.client.describe_clusters(clusters=clusters, include=["TAGS"]) return [ map_esccluster_to_clusterinstance(cluster) for cluster in response["clusters"] ] @error_handler def describe_cluster(self, cluster: str) -> Cluster: return self.describe_clusters([cluster])[0] @error_handler def list_clusters(self) -> List[str]: return self.client.list_clusters()["clusterArns"]
/fbpcs/gateway/s3.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import os from typing import Any, Dict, List, Optional import boto3 from fbpcs.decorator.error_handler import error_handler from tqdm.auto import tqdm class S3Gateway: def __init__( self, region: str = "us-west-1", access_key_id: Optional[str] = None, access_key_data: Optional[str] = None, config: Optional[Dict[str, Any]] = None, ) -> None: self.region = region config = config or {} if access_key_id: config["aws_access_key_id"] = access_key_id if access_key_data: config["aws_secret_access_key"] = access_key_data # pyre-ignore self.client = boto3.client("s3", region_name=self.region, **config) @error_handler def create_bucket(self, bucket: str, region: Optional[str] = None) -> None: region = region if region is not None else self.region self.client.create_bucket( Bucket=bucket, CreateBucketConfiguration={"LocationConstraint": region} ) @error_handler def delete_bucket(self, bucket: str) -> None: self.client.delete_bucket(Bucket=bucket) @error_handler def upload_file(self, file_name: str, bucket: str, key: str) -> None: file_size = os.path.getsize(file_name) self.client.upload_file( file_name, bucket, key, Callback=self.ProgressPercentage(file_name, file_size), ) @error_handler def download_file(self, bucket: str, key: str, file_name: str) -> None: file_size = self.get_object_size(bucket, key) self.client.download_file( bucket, key, file_name, Callback=self.ProgressPercentage(file_name, file_size), ) @error_handler def put_object(self, bucket: str, key: str, data: str) -> None: self.client.put_object(Bucket=bucket, Key=key, Body=data.encode()) @error_handler def get_object(self, bucket: str, key: str) -> str: res = self.client.get_object(Bucket=bucket, Key=key) return res["Body"].read().decode() @error_handler def get_object_size(self, bucket: str, key: str) -> int: return self.client.head_object(Bucket=bucket, Key=key)["ContentLength"] @error_handler def get_object_info(self, bucket: str, key: str) -> Dict[str, Any]: return self.client.get_object(Bucket=bucket, Key=key) @error_handler def list_object2(self, bucket: str, key: str) -> List[str]: paginator = self.client.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket, Prefix=key) key_list = [] for page in pages: for content in page["Contents"]: key_list.append(content["Key"]) return key_list @error_handler def delete_object(self, bucket: str, key: str) -> None: self.client.delete_object(Bucket=bucket, Key=key) @error_handler def object_exists(self, bucket: str, key: str) -> bool: try: # Result intentionally discarded _ = self.client.head_object(Bucket=bucket, Key=key) return True except Exception: return False @error_handler def copy( self, source_bucket: str, source_key: str, dest_bucket: str, dest_key: str ) -> None: source = {"Bucket": source_bucket, "Key": source_key} self.client.copy(source, dest_bucket, dest_key) class ProgressPercentage(object): def __init__(self, file_name: str, file_size: int) -> None: self._progressbar = tqdm(total=file_size, desc=file_name) def __call__(self, bytes_amount: int) -> None: self._progressbar.update(bytes_amount) def __del__(self) -> None: self._progressbar.close()
/fbpcs/mapper/aws.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from functools import reduce from typing import Any, Dict, List from fbpcs.entity.cluster_instance import Cluster, ClusterStatus from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus from fbpcs.entity.vpc_instance import Vpc, VpcState def map_ecstask_to_containerinstance(task: Dict[str, Any]) -> ContainerInstance: container = task["containers"][0] ip_v4 = ( container["networkInterfaces"][0]["privateIpv4Address"] if len(container["networkInterfaces"]) > 0 else None ) status = container["lastStatus"] if status == "RUNNING": status = ContainerInstanceStatus.STARTED elif status == "STOPPED": if container["exitCode"] == 0: status = ContainerInstanceStatus.COMPLETED else: status = ContainerInstanceStatus.FAILED else: status = ContainerInstanceStatus.UNKNOWN return ContainerInstance(task["taskArn"], ip_v4, status) def map_esccluster_to_clusterinstance(cluster: Dict[str, Any]) -> Cluster: status = cluster["status"] if status == "ACTIVE": status = ClusterStatus.ACTIVE elif status == "INACTIVE": status = ClusterStatus.INACTIVE else: status = ClusterStatus.UNKNOWN tags = _convert_aws_tags_to_dict(cluster["tags"], "key", "value") return Cluster(cluster["clusterArn"], cluster["clusterName"], status, tags) def map_ec2vpc_to_vpcinstance(vpc: Dict[str, Any]) -> Vpc: state = vpc["State"] if state == "pending": state = VpcState.PENDING elif state == "available": state = VpcState.AVAILABLE else: state = VpcState.UNKNOWN vpc_id = vpc["VpcId"] # some vpc instances don't have any tags tags = ( _convert_aws_tags_to_dict(vpc["Tags"], "Key", "Value") if "Tags" in vpc else {} ) return Vpc(vpc_id, state, tags) def _convert_aws_tags_to_dict( tag_list: List[Dict[str, str]], tag_key: str, tag_value: str ) -> Dict[str, str]: return reduce(lambda x, y: {**x, **{y[tag_key]: y[tag_value]}}, tag_list, {})
/fbpcs/repository/instance_s3.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import pickle from fbpcs.entity.instance_base import InstanceBase from fbpcs.service.storage_s3 import S3StorageService class S3InstanceRepository: def __init__(self, s3_storage_svc: S3StorageService, base_dir: str) -> None: self.s3_storage_svc = s3_storage_svc self.base_dir = base_dir def create(self, instance: InstanceBase) -> None: if self._exist(instance.get_instance_id()): raise RuntimeError(f"{instance.get_instance_id()} already exists") filename = f"{self.base_dir}{instance.get_instance_id()}" # Use pickle protocol 0 to make ASCII only bytes that can be safely decoded into a string self.s3_storage_svc.write(filename, pickle.dumps(instance, 0).decode()) def read(self, instance_id: str) -> InstanceBase: if not self._exist(instance_id): raise RuntimeError(f"{instance_id} does not exist") filename = f"{self.base_dir}{instance_id}" instance = pickle.loads(self.s3_storage_svc.read(filename).encode()) return instance def update(self, instance: InstanceBase) -> None: if not self._exist(instance.get_instance_id()): raise RuntimeError(f"{instance.get_instance_id()} does not exist") filename = f"{self.base_dir}{instance.get_instance_id()}" # Use pickle protocol 0 to make ASCII only bytes that can be safely decoded into a string self.s3_storage_svc.write(filename, pickle.dumps(instance, 0).decode()) def delete(self, instance_id: str) -> None: if not self._exist(instance_id): raise RuntimeError(f"{instance_id} does not exist") filename = f"{self.base_dir}{instance_id}" self.s3_storage_svc.delete(filename) def _exist(self, instance_id: str) -> bool: return self.s3_storage_svc.file_exists(f"{self.base_dir}{instance_id}")
/fbpcs/repository/mpc_game_repository.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import abc from fbpcs.entity.mpc_game_config import MPCGameConfig class MPCGameRepository(abc.ABC): @abc.abstractmethod def get_game(self, name: str) -> MPCGameConfig: pass
/fbpcs/repository/mpc_instance_local.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from typing import cast from fbpcs.entity.mpc_instance import MPCInstance from fbpcs.repository.instance_local import LocalInstanceRepository from fbpcs.repository.mpc_instance import MPCInstanceRepository class LocalMPCInstanceRepository(MPCInstanceRepository): def __init__(self, base_dir: str) -> None: self.repo = LocalInstanceRepository(base_dir) def create(self, instance: MPCInstance) -> None: self.repo.create(instance) def read(self, instance_id: str) -> MPCInstance: return cast(MPCInstance, self.repo.read(instance_id)) def update(self, instance: MPCInstance) -> None: self.repo.update(instance) def delete(self, instance_id: str) -> None: self.repo.delete(instance_id)
/fbpcs/service/container.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import abc from typing import List from fbpcs.entity.container_instance import ContainerInstance class ContainerService(abc.ABC): @abc.abstractmethod def create_instance(self, container_definition: str, cmd: str) -> ContainerInstance: pass @abc.abstractmethod def create_instances( self, container_definition: str, cmds: List[str] ) -> List[ContainerInstance]: pass @abc.abstractmethod async def create_instances_async( self, container_definition: str, cmds: List[str] ) -> List[ContainerInstance]: pass @abc.abstractmethod def get_instance(self, instance_id: str) -> ContainerInstance: pass @abc.abstractmethod def get_instances(self, instance_ids: List[str]) -> List[ContainerInstance]: pass
/fbpcs/service/container_aws.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import asyncio from typing import Any, Dict, List, Optional, Tuple from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus from fbpcs.gateway.ecs import ECSGateway from fbpcs.service.container import ContainerService from fbpcs.util.typing import checked_cast class AWSContainerService(ContainerService): def __init__( self, region: str, cluster: str, subnet: str, access_key_id: Optional[str] = None, access_key_data: Optional[str] = None, config: Optional[Dict[str, Any]] = None, ) -> None: self.region = region self.cluster = cluster self.subnet = subnet self.ecs_gateway = ECSGateway(region, access_key_id, access_key_data, config) def create_instance(self, container_definition: str, cmd: str) -> ContainerInstance: return asyncio.run(self._create_instance_async(container_definition, cmd)) def create_instances( self, container_definition: str, cmds: List[str] ) -> List[ContainerInstance]: return asyncio.run(self._create_instances_async(container_definition, cmds)) async def create_instances_async( self, container_definition: str, cmds: List[str] ) -> List[ContainerInstance]: return await self._create_instances_async(container_definition, cmds) def get_instance(self, instance_id: str) -> ContainerInstance: return self.ecs_gateway.describe_task(self.cluster, instance_id) def get_instances(self, instance_ids: List[str]) -> List[ContainerInstance]: return self.ecs_gateway.describe_tasks(self.cluster, instance_ids) def list_tasks(self) -> List[str]: return self.ecs_gateway.list_tasks(cluster=self.cluster) def stop_task(self, task_id: str) -> Dict[str, Any]: return self.ecs_gateway.stop_task(cluster=self.cluster, task_id=task_id) def _split_container_definition(self, container_definition: str) -> Tuple[str, str]: """ container_definition = task_definition#container """ s = container_definition.split("#") return (s[0], s[1]) async def _create_instance_async( self, container_definition: str, cmd: str ) -> ContainerInstance: task_definition, container = self._split_container_definition( container_definition ) instance = self.ecs_gateway.run_task( task_definition, container, cmd, self.cluster, self.subnet ) # wait until the container is in running state while instance.status is ContainerInstanceStatus.UNKNOWN: await asyncio.sleep(1) instance = self.get_instance(instance.instance_id) return instance async def _create_instances_async( self, container_definition: str, cmds: List[str] ) -> List[ContainerInstance]: tasks = [ asyncio.create_task(self._create_instance_async(container_definition, cmd)) for cmd in cmds ] res = await asyncio.gather(*tasks) return [checked_cast(ContainerInstance, instance) for instance in res]
/fbpcs/service/log.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import abc from typing import Any, Dict class LogService(abc.ABC): @abc.abstractmethod def fetch(self, log_path: str) -> Dict[str, Any]: pass
/fbpcs/service/log_cloudwatch.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from typing import Any, Dict, Optional from fbpcs.gateway.cloudwatch import CloudWatchGateway from fbpcs.service.log import LogService class CloudWatchLogService(LogService): def __init__( self, log_group: str, region: str = "us-west-1", access_key_id: Optional[str] = None, access_key_data: Optional[str] = None, config: Optional[Dict[str, Any]] = None, ) -> None: self.cloudwatch_gateway = CloudWatchGateway( region, access_key_id, access_key_data, config ) self.log_group = log_group def fetch(self, log_path: str) -> Dict[str, Any]: """Fetch logs""" return self.cloudwatch_gateway.get_log_events(self.log_group, log_path)
/fbpcs/service/mpc.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import asyncio import logging from typing import Any, Dict, List, Optional from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus from fbpcs.entity.mpc_instance import MPCInstance, MPCInstanceStatus, MPCRole from fbpcs.repository.mpc_instance import MPCInstanceRepository from fbpcs.service.container import ContainerService from fbpcs.service.mpc_game import MPCGameService from fbpcs.service.onedocker import OneDockerService from fbpcs.service.storage import StorageService from fbpcs.util.typing import checked_cast class MPCService: """MPCService is responsible for distributing a larger MPC game to multiple MPC workers """ def __init__( self, container_svc: ContainerService, storage_svc: StorageService, instance_repository: MPCInstanceRepository, task_definition: str, mpc_game_svc: MPCGameService, ) -> None: """Constructor of MPCService Keyword arguments: container_svc -- service to spawn container instances storage_svc -- service to read/write input/output files instance_repository -- repository to CRUD MPCInstance task_definition -- containers task definition mpc_game_svc -- service to generate package name and game arguments. """ if ( container_svc is None or storage_svc is None or instance_repository is None or mpc_game_svc is None ): raise ValueError( f"Dependency is missing. container_svc={container_svc}, mpc_game_svc={mpc_game_svc}, " f"storage_svc={storage_svc}, instance_repository={instance_repository}" ) self.container_svc = container_svc self.storage_svc = storage_svc self.instance_repository = instance_repository self.task_definition = task_definition self.mpc_game_svc: MPCGameService = mpc_game_svc self.logger: logging.Logger = logging.getLogger(__name__) self.onedocker_svc = OneDockerService(self.container_svc) """ The game_args should be consistent with the game_config, which should be defined in caller's game repository. For example, If the game config looks like this: game_config = { "game": { "one_docker_package_name": "package_name", "arguments": [ {"name": "input_filenames", "required": True}, {"name": "input_directory", "required": True}, {"name": "output_filenames", "required": True}, {"name": "output_directory", "required": True}, {"name": "concurrency", "required": True}, ], }, The game args should look like this: [ # 1st container { "input_filenames": input_path_1, "input_directory": input_directory, "output_filenames": output_path_1, "output_directory": output_directory, "concurrency": cocurrency, }, # 2nd container { "input_filenames": input_path_2, "input_directory": input_directory, "output_filenames": output_path_2, "output_directory": output_directory, "concurrency": cocurrency, }, ] """ def create_instance( self, instance_id: str, game_name: str, mpc_role: MPCRole, num_workers: int, server_ips: Optional[List[str]] = None, game_args: Optional[List[Dict[str, Any]]] = None, ) -> MPCInstance: self.logger.info(f"Creating MPC instance: {instance_id}") instance = MPCInstance( instance_id=instance_id, game_name=game_name, mpc_role=mpc_role, num_workers=num_workers, server_ips=server_ips, status=MPCInstanceStatus.CREATED, game_args=game_args, ) self.instance_repository.create(instance) return instance def start_instance( self, instance_id: str, output_files: Optional[List[str]] = None, server_ips: Optional[List[str]] = None, timeout: Optional[int] = None, ) -> MPCInstance: return asyncio.run( self.start_instance_async(instance_id, output_files, server_ips, timeout) ) async def start_instance_async( self, instance_id: str, output_files: Optional[List[str]] = None, server_ips: Optional[List[str]] = None, timeout: Optional[int] = None, ) -> MPCInstance: """To run a distributed MPC game Keyword arguments: instance_id -- unique id to identify the MPC instance """ instance = self.instance_repository.read(instance_id) self.logger.info(f"Starting MPC instance: {instance_id}") if instance.mpc_role is MPCRole.CLIENT and not server_ips: raise ValueError("Missing server_ips") # spin up containers self.logger.info("Spinning up container instances") game_args = instance.game_args instance.containers = await self._spin_up_containers_onedocker( instance.game_name, instance.mpc_role, instance.num_workers, game_args, server_ips, timeout, ) if len(instance.containers) != instance.num_workers: self.logger.warning( f"Instance {instance_id} has {len(instance.containers)} containers spun up, but expecting {instance.num_workers} containers!" ) if instance.mpc_role is MPCRole.SERVER: ip_addresses = [ checked_cast(str, instance.ip_address) for instance in instance.containers ] instance.server_ips = ip_addresses instance.status = MPCInstanceStatus.STARTED self.instance_repository.update(instance) return instance def get_instance(self, instance_id: str) -> MPCInstance: self.logger.info(f"Getting MPC instance: {instance_id}") return self.instance_repository.read(instance_id) def update_instance(self, instance_id: str) -> MPCInstance: instance = self.instance_repository.read(instance_id) self.logger.info(f"Updating MPC instance: {instance_id}") if instance.status in [MPCInstanceStatus.COMPLETED, MPCInstanceStatus.FAILED]: return instance # skip if no containers registered under instance yet if instance.containers: instance.containers = self._update_container_instances(instance.containers) if len(instance.containers) != instance.num_workers: self.logger.warning( f"Instance {instance_id} has {len(instance.containers)} containers after update, but expecting {instance.num_workers} containers!" ) instance.status = self._get_instance_status(instance) self.instance_repository.update(instance) return instance async def _spin_up_containers_onedocker( self, game_name: str, mpc_role: MPCRole, num_containers: int, game_args: Optional[List[Dict[str, Any]]] = None, ip_addresses: Optional[List[str]] = None, timeout: Optional[int] = None, ) -> List[ContainerInstance]: if game_args is not None and len(game_args) != num_containers: raise ValueError( "The number of containers is not consistent with the number of game argument dictionary." ) if ip_addresses is not None and len(ip_addresses) != num_containers: raise ValueError( "The number of containers is not consistent with number of ip addresses." ) cmd_tuple_list = [] for i in range(num_containers): game_arg = game_args[i] if game_args is not None else {} server_ip = ip_addresses[i] if ip_addresses is not None else None cmd_tuple_list.append( self.mpc_game_svc.build_one_docker_args( game_name=game_name, mpc_role=mpc_role, server_ip=server_ip, **game_arg, ) ) cmd_args_list = [cmd_args for (package_name, cmd_args) in cmd_tuple_list] return await self.onedocker_svc.start_containers_async( self.task_definition, cmd_tuple_list[0][0], cmd_args_list, timeout ) def _update_container_instances( self, containers: List[ContainerInstance] ) -> List[ContainerInstance]: ids = [container.instance_id for container in containers] return self.container_svc.get_instances(ids) def _get_instance_status(self, instance: MPCInstance) -> MPCInstanceStatus: status = MPCInstanceStatus.COMPLETED for container in instance.containers: if container.status == ContainerInstanceStatus.FAILED: return MPCInstanceStatus.FAILED if container.status == ContainerInstanceStatus.UNKNOWN: return MPCInstanceStatus.UNKNOWN if container.status == ContainerInstanceStatus.STARTED: status = MPCInstanceStatus.STARTED return status
/fbpcs/service/onedocker.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import asyncio import logging from typing import List, Optional from fbpcs.entity.container_instance import ContainerInstance from fbpcs.service.container import ContainerService ONE_DOCKER_CMD_PREFIX = ( # patternlint-disable-next-line f-string-may-be-missing-leading-f "python3.8 -m one_docker_runner --package_name={0} --cmd='/root/one_docker/package/" ) class OneDockerService: """OneDockerService is responsible for executing executable(s) in a Fargate container""" def __init__(self, container_svc: ContainerService) -> None: """Constructor of OneDockerService container_svc -- service to spawn container instances TODO: log_svc -- service to read cloudwatch logs """ if container_svc is None: raise ValueError(f"Dependency is missing. container_svc={container_svc}, ") self.container_svc = container_svc self.logger: logging.Logger = logging.getLogger(__name__) def start_container( self, container_definition: str, package_name: str, cmd_args: str, timeout: Optional[int] = None, ) -> ContainerInstance: # TODO: ContainerInstance mapper return asyncio.run( self.start_containers_async( container_definition, package_name, [cmd_args], timeout ) )[0] def start_containers( self, container_definition: str, package_name: str, cmd_args_list: List[str], timeout: Optional[int] = None, ) -> List[ContainerInstance]: return asyncio.run( self.start_containers_async( container_definition, package_name, cmd_args_list, timeout ) ) async def start_containers_async( self, container_definition: str, package_name: str, cmd_args_list: List[str], timeout: Optional[int] = None, ) -> List[ContainerInstance]: """Asynchronously spin up one container per element in input command list.""" cmds = [ self._get_cmd(package_name, cmd_args, timeout) for cmd_args in cmd_args_list ] self.logger.info("Spinning up container instances") container_ids = await self.container_svc.create_instances_async( container_definition, cmds ) return container_ids def _get_exe_name(self, package_name: str) -> str: return package_name.split("/")[1] def _get_cmd( self, package_name: str, cmd_args: str, timeout: Optional[int] = None ) -> str: cmd_timeout = "" """ If we passed --timeout=None, the schema module will raise error, since f-string converts None to "None" and schema treats None in --timeout=None as a string """ if timeout is not None: cmd_timeout = f" --timeout={timeout}" return f"{ONE_DOCKER_CMD_PREFIX.format(package_name, timeout)}{self._get_exe_name(package_name)} {cmd_args}'{cmd_timeout}"
/fbpcs/service/storage.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import abc from enum import Enum class PathType(Enum): Local = 1 S3 = 2 class StorageService(abc.ABC): @abc.abstractmethod def read(self, filename: str) -> str: pass @abc.abstractmethod def write(self, filename: str, data: str) -> None: pass @abc.abstractmethod def copy(self, source: str, destination: str) -> None: pass @abc.abstractmethod def file_exists(self, filename: str) -> bool: pass @staticmethod def path_type(filename: str) -> PathType: if filename.startswith("https:"): return PathType.S3 return PathType.Local @abc.abstractmethod def get_file_size(self, filename: str) -> int: pass
/fbpcs/service/storage_s3.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import os from os import path from os.path import join, normpath, relpath from typing import Any, Dict, Optional from fbpcs.gateway.s3 import S3Gateway from fbpcs.service.storage import PathType, StorageService from fbpcs.util.s3path import S3Path class S3StorageService(StorageService): def __init__( self, region: str = "us-west-1", access_key_id: Optional[str] = None, access_key_data: Optional[str] = None, config: Optional[Dict[str, Any]] = None, ) -> None: self.s3_gateway = S3Gateway(region, access_key_id, access_key_data, config) def read(self, filename: str) -> str: """Read a file data Keyword arguments: filename -- "https://bucket-name.s3.Region.amazonaws.com/key-name" """ s3_path = S3Path(filename) return self.s3_gateway.get_object(s3_path.bucket, s3_path.key) def write(self, filename: str, data: str) -> None: """Write data into a file Keyword arguments: filename -- "https://bucket-name.s3.Region.amazonaws.com/key-name"` """ s3_path = S3Path(filename) self.s3_gateway.put_object(s3_path.bucket, s3_path.key, data) def copy(self, source: str, destination: str, recursive: bool = False) -> None: """Move a file or folder between local storage and S3, as well as, S3 and S3 Keyword arguments: source -- source file destination -- destination file recursive -- whether to recursively copy a folder """ if StorageService.path_type(source) == PathType.Local: # from local to S3 if StorageService.path_type(destination) == PathType.Local: raise ValueError("Both source and destination are local files") s3_path = S3Path(destination) if path.isdir(source): if not recursive: raise ValueError(f"Source {source} is a folder. Use --recursive") self.upload_dir(source, s3_path.bucket, s3_path.key) else: self.s3_gateway.upload_file(source, s3_path.bucket, s3_path.key) else: source_s3_path = S3Path(source) if StorageService.path_type(destination) == PathType.S3: # from S3 to S3 dest_s3_path = S3Path(destination) if source_s3_path == dest_s3_path: raise ValueError( f"Source {source} and destination {destination} are the same" ) if source.endswith("/"): if not recursive: raise ValueError( f"Source {source} is a folder. Use --recursive" ) self.copy_dir( source_s3_path.bucket, source_s3_path.key + "/", dest_s3_path.bucket, dest_s3_path.key, ) else: self.s3_gateway.copy( source_s3_path.bucket, source_s3_path.key, dest_s3_path.bucket, dest_s3_path.key, ) else: # from S3 to local if source.endswith("/"): if not recursive: raise ValueError( f"Source {source} is a folder. Use --recursive" ) self.download_dir( source_s3_path.bucket, source_s3_path.key + "/", destination, ) else: self.s3_gateway.download_file( source_s3_path.bucket, source_s3_path.key, destination ) def upload_dir(self, source: str, s3_path_bucket: str, s3_path_key: str) -> None: for root, dirs, files in os.walk(source): for file in files: local_path = join(root, file) destination_path = s3_path_key + "/" + relpath(local_path, source) self.s3_gateway.upload_file( local_path, s3_path_bucket, destination_path, ) for dir in dirs: local_path = join(root, dir) destination_path = s3_path_key + "/" + relpath(local_path, source) self.s3_gateway.put_object( s3_path_bucket, destination_path + "/", "", ) def download_dir( self, s3_path_bucket: str, s3_path_key: str, destination: str ) -> None: if not self.s3_gateway.object_exists(s3_path_bucket, s3_path_key): raise ValueError( f"Key {s3_path_key} does not exist in bucket {s3_path_bucket}" ) keys = self.s3_gateway.list_object2(s3_path_bucket, s3_path_key) for key in keys: local_path = normpath(destination + "/" + key[len(s3_path_key) :]) if key.endswith("/"): if not path.exists(local_path): os.makedirs(local_path) else: self.s3_gateway.download_file(s3_path_bucket, key, local_path) def copy_dir( self, source_bucket: str, source_key: str, destination_bucket: str, destination_key: str, ) -> None: if not self.s3_gateway.object_exists(source_bucket, source_key): raise ValueError( f"Key {source_key} does not exist in bucket {source_bucket}" ) keys = self.s3_gateway.list_object2(source_bucket, source_key) for key in keys: destination_path = destination_key + "/" + key[len(source_key) :] if key.endswith("/"): self.s3_gateway.put_object( source_bucket, destination_path, "", ) else: self.s3_gateway.copy( source_bucket, key, destination_bucket, destination_path, ) def delete(self, filename: str) -> None: """Delete an s3 file Keyword arguments: filename -- the s3 file to be deleted """ if StorageService.path_type(filename) == PathType.S3: s3_path = S3Path(filename) self.s3_gateway.delete_object(s3_path.bucket, s3_path.key) else: raise ValueError("The file is not an s3 file") def file_exists(self, filename: str) -> bool: if StorageService.path_type(filename) == PathType.S3: s3_path = S3Path(filename) return self.s3_gateway.object_exists(s3_path.bucket, s3_path.key) else: raise ValueError(f"File {filename} is not an S3 filepath") def ls_file(self, filename: str) -> Dict[str, Any]: """Show file information (last modified time, type and size) Keyword arguments: filename -- the s3 file to be shown """ s3_path = S3Path(filename) return self.s3_gateway.get_object_info(s3_path.bucket, s3_path.key) def get_file_size(self, filename: str) -> int: s3_path = S3Path(filename) return self.s3_gateway.get_object_size(s3_path.bucket, s3_path.key)
/fbpcs/util/reflect.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from importlib import import_module from typing import Any # pyre-ignore def get_class(class_path: str) -> Any: module_name, class_name = class_path.rsplit(".", 1) module = import_module(module_name) return getattr(module, class_name)
/fbpcs/util/s3path.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import re from typing import Tuple class S3Path: region: str bucket: str key: str def __init__(self, fileURL: str) -> None: self.region, self.bucket, self.key = self._get_region_bucket_key(fileURL) def __eq__(self, other: "S3Path") -> bool: return ( self.region == other.region and self.bucket == other.bucket and self.key == other.key ) # virtual host style url # https://bucket-name.s3.Region.amazonaws.com/key-name def _get_region_bucket_key(self, fileURL: str) -> Tuple[str, str, str]: match = re.search("^https?:/([^.]+).s3.([^.]+).amazonaws.com/(.*)$", fileURL) if not match: raise ValueError(f"Could not parse {fileURL} as an S3Path") bucket, region, key = ( match.group(1).strip("/"), match.group(2), match.group(3).strip("/"), ) return (region, bucket, key)
/fbpcs/util/typing.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from typing import Type, TypeVar T = TypeVar("T") V = TypeVar("V") # pyre-fixme[34]: `T` isn't present in the function's parameters. def checked_cast(typ: Type[V], val: V) -> T: if not isinstance(val, typ): raise ValueError(f"Value was not of type {type!r}:\n{val!r}") # pyre-fixme[7]: Expected `T` but got `V`. return val
/fbpcs/util/yaml.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict from pathlib import Path from typing import Any, Dict import yaml def load(file_path: Path) -> Dict[str, Any]: with open(file_path) as stream: return yaml.safe_load(stream) # pyre-ignore def dump(data: Any, file_path: Path) -> None: with open(file_path, "w") as f: return yaml.dump(data, f)
/onedocker/env.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This is the repository path that OneDocker downloads binaries from ONEDOCKER_REPOSITORY_PATH = "ONEDOCKER_REPOSITORY_PATH" # This is the local path that the binaries reside ONEDOCKER_EXE_PATH = "ONEDOCKER_EXE_PATH"
/onedocker/onedocker_runner.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ CLI for running an executable in one docker Usage: onedocker-runner <package_name> --cmd=<cmd> [options] Options: -h --help Show this help --repository_path=<repository_path> The folder repository that the executables are to downloaded from --exe_path=<exe_path> The folder that the executables are located at --timeout=<timeout> Set timeout (in sec) to task to avoid endless running --log_path=<path> Override the default path where logs are saved --verbose Set logging level to DEBUG """ import logging import os import subprocess import sys from pathlib import Path from typing import Tuple, Any, Optional import psutil import schema from docopt import docopt from env import ONEDOCKER_EXE_PATH, ONEDOCKER_REPOSITORY_PATH from fbpcs.service.storage_s3 import S3StorageService from fbpcs.util.s3path import S3Path from util import run_cmd # the folder on s3 that the executables are to downloaded from DEFAULT_REPOSITORY_PATH = "https://one-docker-repository.s3.us-west-1.amazonaws.com/" # the folder in the docker image that is going to host the executables DEFAULT_EXE_FOLDER = "/root/one_docker/package/" def run( repository_path: str, exe_path: str, package_name: str, cmd: str, logger: logging.Logger, timeout: int, ) -> None: # download executable from s3 if repository_path.upper() != "LOCAL": logger.info("Downloading executables ...") _download_executables(repository_path, package_name) else: logger.info("Local repository, skip download ...") # grant execute permission to the downloaded executable file team, exe_name = _parse_package_name(package_name) subprocess.run(f"chmod +x {exe_path}/{exe_name}", shell=True) # TODO update this line after proper change in fbcode/measurement/private_measurement/pcs/oss/fbpcs/service/onedocker.py to take # out the hard coded exe_path in cmd string if repository_path.upper() == "LOCAL": cmd = exe_path + cmd # run execution cmd logger.info(f"Running cmd: {cmd} ...") net_start: Any = psutil.net_io_counters() return_code = run_cmd(cmd, timeout) if return_code != 0: logger.info(f"Subprocess returned non-zero return code: {return_code}") net_end: Any = psutil.net_io_counters() logger.info( f"Net usage: {net_end.bytes_sent - net_start.bytes_sent} bytes sent, {net_end.bytes_recv - net_start.bytes_recv} bytes received" ) sys.exit(return_code) def _download_executables( repository_path: str, package_name: str, ) -> None: s3_region = S3Path(repository_path).region team, exe_name = _parse_package_name(package_name) exe_local_path = DEFAULT_EXE_FOLDER + exe_name exe_s3_path = repository_path + package_name storage_svc = S3StorageService(s3_region) storage_svc.copy(exe_s3_path, exe_local_path) def _parse_package_name(package_name: str) -> Tuple[str, str]: return package_name.split("/")[0], package_name.split("/")[1] def _read_config( logger: logging.Logger, config_name: str, argument: Optional[str], env_var: str, default_val: str, ): if argument: logger.info(f"Read {config_name} from program arguments...") return argument if os.getenv(env_var): logger.info(f"Read {config_name} from environment variables...") return os.getenv(env_var) logger.info(f"Read {config_name} from default value...") return default_val def main(): s = schema.Schema( { "<package_name>": str, "--cmd": schema.Or(None, str), "--repository_path": schema.Or(None, schema.And(str, len)), "--exe_path": schema.Or(None, schema.And(str, len)), "--timeout": schema.Or(None, schema.Use(int)), "--log_path": schema.Or(None, schema.Use(Path)), "--verbose": bool, "--help": bool, } ) arguments = s.validate(docopt(__doc__)) log_path = arguments["--log_path"] log_level = logging.DEBUG if arguments["--verbose"] else logging.INFO logging.basicConfig(filename=log_path, level=log_level) logger = logging.getLogger(__name__) # timeout could be None if the caller did not provide the value timeout = arguments["--timeout"] repository_path = _read_config( logger, "repository_path", arguments["--repository_path"], ONEDOCKER_REPOSITORY_PATH, DEFAULT_REPOSITORY_PATH, ) exe_path = _read_config( logger, "exe_path", arguments["--exe_path"], ONEDOCKER_EXE_PATH, DEFAULT_EXE_FOLDER, ) logger.info("Starting program....") try: run( repository_path=repository_path, exe_path=exe_path, package_name=arguments["<package_name>"], cmd=arguments["--cmd"], logger=logger, timeout=timeout, ) except subprocess.TimeoutExpired: logger.error(f"{timeout} seconds have passed. Now exiting the program....") sys.exit(1) except InterruptedError: logger.error("Receive abort command from user, Now exiting the program....") sys.exit(1) if __name__ == "__main__": main()
/onedocker/tests/test_util.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess import unittest from util import run_cmd class TestUtil(unittest.TestCase): def test_run_cmd(self): self.assertEqual(0, run_cmd("cat", 1)) def test_run_cmd_with_timeout(self): self.assertRaises(subprocess.TimeoutExpired, run_cmd, "vi", 1)
/onedocker/util.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import signal import subprocess from typing import Optional def run_cmd(cmd: str, timeout: Optional[int]) -> int: # The handler dealing signal SIGINT, which could be Ctrl + C from user's terminal def _handler(signum, frame): raise InterruptedError signal.signal(signal.SIGINT, _handler) """ If start_new_session is true the setsid() system call will be made in the child process prior to the execution of the subprocess, which makes sure every process in the same process group can be killed by OS if timeout occurs. note: setsid() will set the pgid to its pid. """ with subprocess.Popen(cmd, shell=True, start_new_session=True) as proc: try: proc.communicate(timeout=timeout) except (subprocess.TimeoutExpired, InterruptedError) as e: proc.terminate() os.killpg(proc.pid, signal.SIGTERM) raise e return proc.wait()
/setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from setuptools import setup, find_packages install_requires = [ "boto3==1.11.11", "dataclasses-json==0.5.2", "pyyaml==5.4.1", "tqdm==4.55.1", ] with open("README.md", encoding="utf-8") as f: long_description = f.read() setup( name="fbpcs", version="0.1.0", description="Facebook Private Computation Service", author="Facebook", author_email="researchtool-help@fb.com", url="https://github.com/facebookresearch/FBPCS", install_requires=install_requires, packages=find_packages(), long_description_content_type="text/markdown", long_description=long_description, python_requires=">=3.8", )
/tests/decorator/test_error_handler.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from botocore.exceptions import ClientError from fbpcs.decorator.error_handler import error_handler from fbpcs.error.pcs import PcsError from fbpcs.error.throttling import ThrottlingError class TestErrorHandler(unittest.TestCase): def test_pcs_error(self): @error_handler def foo(): raise ValueError("just a test") self.assertRaises(PcsError, foo) def test_throttling_error(self): @error_handler def foo(): err = ClientError( { "Error": { "Code": "ThrottlingException", "Message": "test", }, }, "test", ) raise err self.assertRaises(ThrottlingError, foo) def test_wrapped_function_args(self): @error_handler def foo(**kwargs): raise ValueError("just a test f") error_msgs = { "error_type1": "error_msg1", "error_type2": "error_msg2", } self.assertRaises(PcsError, foo, error_msgs) def test_wrapped_function_kwargs(self): @error_handler def foo(*args): raise ValueError("just a test") self.assertRaises(PcsError, foo, "error1", "error2")
/tests/error/mapper/test_aws.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from botocore.exceptions import ClientError from fbpcs.error.mapper.aws import map_aws_error from fbpcs.error.pcs import PcsError from fbpcs.error.throttling import ThrottlingError class TestMapAwsError(unittest.TestCase): def test_pcs_error(self): err = ClientError( { "Error": { "Code": "Exception", "Message": "test", }, }, "test", ) err = map_aws_error(err) self.assertIsInstance(err, PcsError) def test_throttling_error(self): err = ClientError( { "Error": { "Code": "ThrottlingException", "Message": "test", }, }, "test", ) err = map_aws_error(err) self.assertIsInstance(err, ThrottlingError)
/tests/gateway/test_cloudwatch.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch from fbpcs.gateway.cloudwatch import CloudWatchGateway class TestCloudWatchGateway(unittest.TestCase): REGION = "us-west-1" GROUP_NAME = "test-group-name" STREAM_NAME = "test-stream-name" @patch("boto3.client") def test_get_log_events(self, BotoClient): gw = CloudWatchGateway(self.REGION) mocked_log = {"test-events": [{"test-event-name": "test-event-data"}]} gw.client = BotoClient() gw.client.get_log_events = MagicMock(return_value=mocked_log) returned_log = gw.get_log_events(self.GROUP_NAME, self.STREAM_NAME) gw.client.get_log_events.assert_called() self.assertEqual(mocked_log, returned_log)
/tests/gateway/test_ec2.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch from fbpcs.entity.vpc_instance import Vpc, VpcState from fbpcs.gateway.ec2 import EC2Gateway TEST_VPC_ID = "test-vpc-id" TEST_ACCESS_KEY_ID = "test-access-key-id" TEST_ACCESS_KEY_DATA = "test-access-key-data" TEST_VPC_TAG_KEY = "test-vpc-tag-key" TEST_VPC_TAG_VALUE = "test-vpc-tag-value" REGION = "us-west-2" class TestEC2Gateway(unittest.TestCase): @patch("boto3.client") def setUp(self, BotoClient): self.gw = EC2Gateway(REGION, TEST_ACCESS_KEY_ID, TEST_ACCESS_KEY_DATA) self.gw.client = BotoClient() def test_describe_vpcs(self): client_return_response = { "Vpcs": [ { "State": "UNKNOWN", "VpcId": TEST_VPC_ID, "Tags": [ { "Key": TEST_VPC_TAG_KEY, "Value": TEST_VPC_TAG_VALUE, }, ], } ] } tags = {TEST_VPC_TAG_KEY: TEST_VPC_TAG_VALUE} self.gw.client.describe_vpcs = MagicMock(return_value=client_return_response) vpcs = self.gw.describe_vpcs([TEST_VPC_ID]) expected_vpcs = [ Vpc( TEST_VPC_ID, VpcState.UNKNOWN, tags, ), ] self.assertEqual(vpcs, expected_vpcs) self.gw.client.describe_vpcs.assert_called() def test_list_vpcs(self): client_return_response = { "Vpcs": [ {"VpcId": TEST_VPC_ID}, ] } self.gw.client.describe_vpcs = MagicMock(return_value=client_return_response) vpcs = self.gw.list_vpcs() expected_vpcs = [TEST_VPC_ID] self.assertEqual(vpcs, expected_vpcs) self.gw.client.describe_vpcs.assert_called()
/tests/gateway/test_ecs.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch from fbpcs.entity.cluster_instance import ClusterStatus, Cluster from fbpcs.entity.container_instance import ContainerInstanceStatus, ContainerInstance from fbpcs.gateway.ecs import ECSGateway class TestECSGateway(unittest.TestCase): TEST_TASK_ARN = "test-task-arn" TEST_TASK_DEFINITION = "test-task-definition" TEST_CONTAINER = "test-container" TEST_CLUSTER = "test-cluster" TEST_CMD = "test-cmd" TEST_SUBNET = "test-subnet" TEST_ACCESS_KEY_ID = "test-access-key-id" TEST_ACCESS_KEY_DATA = "test-access-key-data" TEST_IP_ADDRESS = "127.0.0.1" TEST_FILE = "test-file" TEST_CLUSTER_TAG_KEY = "test-tag-key" TEST_CLUSTER_TAG_VALUE = "test-tag-value" REGION = "us-west-2" @patch("boto3.client") def setUp(self, BotoClient): self.gw = ECSGateway( self.REGION, self.TEST_ACCESS_KEY_ID, self.TEST_ACCESS_KEY_DATA ) self.gw.client = BotoClient() def test_run_task(self): client_return_response = { "tasks": [ { "containers": [ { "name": "container_1", "exitcode": 123, "lastStatus": "RUNNING", "networkInterfaces": [ { "privateIpv4Address": self.TEST_IP_ADDRESS, }, ], } ], "taskArn": self.TEST_TASK_ARN, } ] } self.gw.client.run_task = MagicMock(return_value=client_return_response) task = self.gw.run_task( self.TEST_TASK_DEFINITION, self.TEST_CONTAINER, self.TEST_CMD, self.TEST_CLUSTER, self.TEST_SUBNET, ) expected_task = ContainerInstance( self.TEST_TASK_ARN, self.TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ) self.assertEqual(task, expected_task) self.gw.client.run_task.assert_called() def test_describe_tasks(self): client_return_response = { "tasks": [ { "containers": [ { "name": self.TEST_CONTAINER, "exitcode": 123, "lastStatus": "RUNNING", "networkInterfaces": [ { "privateIpv4Address": self.TEST_IP_ADDRESS, }, ], } ], "taskArn": self.TEST_TASK_ARN, } ] } self.gw.client.describe_tasks = MagicMock(return_value=client_return_response) tasks = [ self.TEST_TASK_DEFINITION, ] tasks = self.gw.describe_tasks(self.TEST_CLUSTER, tasks) expected_tasks = [ ContainerInstance( self.TEST_TASK_ARN, self.TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ] self.assertEqual(tasks, expected_tasks) self.gw.client.describe_tasks.assert_called() def test_stop_task(self): client_return_response = { "task": { "containers": [ { "name": self.TEST_CONTAINER, "exitcode": 1, "lastStatus": "STOPPED", "networkInterfaces": [ { "privateIpv4Address": self.TEST_IP_ADDRESS, }, ], } ], "taskArn": self.TEST_TASK_ARN, } } self.gw.client.stop_task = MagicMock(return_value=client_return_response) self.gw.stop_task(self.TEST_CLUSTER, self.TEST_TASK_ARN) self.gw.client.stop_task.assert_called() def test_list_tasks(self): client_return_response = {"taskArns": [self.TEST_TASK_ARN]} self.gw.client.list_tasks = MagicMock(return_value=client_return_response) tasks = self.gw.list_tasks(self.TEST_CLUSTER) expected_tasks = [self.TEST_TASK_ARN] self.assertEqual(tasks, expected_tasks) self.gw.client.list_tasks.assert_called() def test_describe_clusers(self): client_return_response = { "clusters": [ { "clusterArn": self.TEST_CLUSTER, "clusterName": "cluster_1", "tags": [ { "key": self.TEST_CLUSTER_TAG_KEY, "value": self.TEST_CLUSTER_TAG_VALUE, }, ], "status": "ACTIVE", } ] } self.gw.client.describe_clusters = MagicMock( return_value=client_return_response ) clusters = self.gw.describe_clusters( [ self.TEST_CLUSTER, ] ) tags = {self.TEST_CLUSTER_TAG_KEY: self.TEST_CLUSTER_TAG_VALUE} expected_clusters = [ Cluster(self.TEST_CLUSTER, "cluster_1", ClusterStatus.ACTIVE, tags) ] self.assertEqual(expected_clusters, clusters) self.gw.client.describe_clusters.assert_called()
/tests/gateway/test_s3.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch from fbpcs.gateway.s3 import S3Gateway class TestS3Gateway(unittest.TestCase): TEST_LOCAL_FILE = "test-local-file" TEST_BUCKET = "test-bucket" TEST_FILE = "test-file" TEST_ACCESS_KEY_ID = "test-access-key-id" TEST_ACCESS_KEY_DATA = "test-access-key-data" REGION = "us-west-1" @patch("boto3.client") def test_create_bucket(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.create_bucket = MagicMock(return_value=None) gw.create_bucket(self.TEST_BUCKET) gw.client.create_bucket.assert_called() @patch("boto3.client") def test_delete_bucket(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.delete_bucket = MagicMock(return_value=None) gw.delete_bucket(self.TEST_BUCKET) gw.client.delete_bucket.assert_called() @patch("boto3.client") def test_put_object(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.put_object = MagicMock(return_value=None) gw.put_object( self.TEST_BUCKET, self.TEST_ACCESS_KEY_ID, self.TEST_ACCESS_KEY_DATA ) gw.client.put_object.assert_called() @patch("os.path.getsize", return_value=100) @patch("boto3.client") def test_upload_file(self, BotoClient, mock_getsize): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.upload_file = MagicMock(return_value=None) gw.upload_file(self.TEST_LOCAL_FILE, self.TEST_BUCKET, self.TEST_FILE) gw.client.upload_file.assert_called() @patch("boto3.client") def test_download_file(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.head_object.return_value = {"ContentLength": 100} gw.client.download_file = MagicMock(return_value=None) gw.download_file(self.TEST_BUCKET, self.TEST_FILE, self.TEST_LOCAL_FILE) gw.client.download_file.assert_called() @patch("boto3.client") def test_delete_object(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.delete_object = MagicMock(return_value=None) gw.delete_object(self.TEST_BUCKET, self.TEST_FILE) gw.client.delete_object.assert_called() @patch("boto3.client") def test_copy(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.copy = MagicMock(return_value=None) gw.copy( self.TEST_BUCKET, self.TEST_FILE, self.TEST_BUCKET, f"{self.TEST_FILE}_COPY" ) gw.client.copy.assert_called() @patch("boto3.client") def test_object_exists(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.head_object = MagicMock(return_value=None) self.assertTrue(gw.object_exists(self.TEST_BUCKET, self.TEST_ACCESS_KEY_ID)) gw.client.head_object.assert_called() @patch("boto3.client") def test_object_not_exists(self, BotoClient): gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.head_object = MagicMock(side_effect=Exception) self.assertFalse(gw.object_exists(self.TEST_BUCKET, self.TEST_ACCESS_KEY_ID)) gw.client.head_object.assert_called() @patch("boto3.client") def test_list_object2(self, BotoClient): test_page_content_key1 = "test-page-content-key1" test_page_content_key2 = "test-page-content-key2" client_return_response = [ { "Contents": [ {"Key": test_page_content_key1}, {"Key": test_page_content_key2}, ], } ] gw = S3Gateway(self.REGION) gw.client = BotoClient() gw.client.get_paginator("list_objects_v2").paginate = MagicMock( return_value=client_return_response ) key_list = gw.list_object2(self.TEST_BUCKET, self.TEST_ACCESS_KEY_ID) expected_key_list = [ test_page_content_key1, test_page_content_key2, ] self.assertEqual(key_list, expected_key_list) gw.client.get_paginator("list_object_v2").paginate.assert_called()
/tests/mapper/test_aws.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fbpcs.entity.cluster_instance import ClusterStatus, Cluster from fbpcs.entity.container_instance import ContainerInstanceStatus, ContainerInstance from fbpcs.mapper.aws import ( map_ecstask_to_containerinstance, map_esccluster_to_clusterinstance, ) class TestAWSMapper(unittest.TestCase): TEST_IP_ADDRESS = "127.0.0.1" TEST_TASK_ARN = "test-task-arn" TEST_CLUSTER_ARN = "test-cluster-arn" TEST_CLUSTER_NAME = "test-cluster-name" def test_map_ecstask_to_containerinstance(self): ecs_task_response = { "tasks": [ { "containers": [ { "exitCode": None, "lastStatus": "RUNNING", "networkInterfaces": [ { "privateIpv4Address": self.TEST_IP_ADDRESS, }, ], }, ], "taskArn": self.TEST_TASK_ARN, }, { "containers": [ { "exitCode": 0, "lastStatus": "STOPPED", "networkInterfaces": [], }, ], "taskArn": self.TEST_TASK_ARN, }, { "containers": [ { "exitCode": 1, "lastStatus": "STOPPED", "networkInterfaces": [], }, ], "taskArn": self.TEST_TASK_ARN, }, { "containers": [ { "exitCode": -1, "lastStatus": "UNKNOWN", "networkInterfaces": [], }, ], "taskArn": self.TEST_TASK_ARN, }, ] } expected_task_list = [ ContainerInstance( self.TEST_TASK_ARN, self.TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ContainerInstance( self.TEST_TASK_ARN, None, ContainerInstanceStatus.COMPLETED, ), ContainerInstance( self.TEST_TASK_ARN, None, ContainerInstanceStatus.FAILED, ), ContainerInstance( self.TEST_TASK_ARN, None, ContainerInstanceStatus.UNKNOWN, ), ] tasks_list = [ map_ecstask_to_containerinstance(task) for task in ecs_task_response["tasks"] ] self.assertEqual(tasks_list, expected_task_list) def test_map_esccluster_to_clusterinstance(self): tag_key_1 = "tag-key-1" tag_key_2 = "tag-key-2" tag_value_1 = "tag-value-1" tag_value_2 = "tag-value-2" ecs_cluster_response = { "clusters": [ { "clusterName": self.TEST_CLUSTER_NAME, "clusterArn": self.TEST_CLUSTER_ARN, "status": "ACTIVE", "tags": [ { "key": tag_key_1, "value": tag_value_1, }, { "key": tag_key_2, "value": tag_value_2, }, ], }, { "clusterName": self.TEST_CLUSTER_NAME, "clusterArn": self.TEST_CLUSTER_ARN, "status": "INACTIVE", "tags": [ { "key": tag_key_1, "value": tag_value_1, }, ], }, { "clusterName": self.TEST_CLUSTER_NAME, "clusterArn": self.TEST_CLUSTER_ARN, "status": "UNKNOWN", "tags": [ { "key": tag_key_1, "value": tag_value_1, }, ], }, ] } multi_tag_value_pair = { tag_key_1: tag_value_1, tag_key_2: tag_value_2, } single_tag_value_pair = {tag_key_1: tag_value_1} expected_cluster_list = [ Cluster( self.TEST_CLUSTER_ARN, self.TEST_CLUSTER_NAME, ClusterStatus.ACTIVE, multi_tag_value_pair, ), Cluster( self.TEST_CLUSTER_ARN, self.TEST_CLUSTER_NAME, ClusterStatus.INACTIVE, single_tag_value_pair, ), Cluster( self.TEST_CLUSTER_ARN, self.TEST_CLUSTER_NAME, ClusterStatus.UNKNOWN, single_tag_value_pair, ), ] cluster_list = [ map_esccluster_to_clusterinstance(cluster) for cluster in ecs_cluster_response["clusters"] ] self.assertEqual(cluster_list, expected_cluster_list)
/tests/repository/test_instance_s3.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import pickle import unittest import uuid from unittest.mock import MagicMock from fbpcs.entity.mpc_instance import MPCInstance, MPCInstanceStatus, MPCRole from fbpcs.repository.instance_s3 import S3InstanceRepository from fbpcs.service.storage_s3 import S3StorageService class TestS3InstanceRepository(unittest.TestCase): TEST_BASE_DIR = "./" TEST_INSTANCE_ID = str(uuid.uuid4()) TEST_GAME_NAME = "lift" TEST_MPC_ROLE = MPCRole.SERVER TEST_NUM_WORKERS = 1 TEST_SERVER_IPS = ["192.0.2.0", "192.0.2.1"] TEST_INPUT_ARGS = [{"input_filenames": "test_input_file"}] TEST_OUTPUT_ARGS = [{"output_filenames": "test_output_file"}] TEST_CONCURRENCY_ARGS = {"concurrency": 2} TEST_INPUT_DIRECTORY = "TEST_INPUT_DIRECTORY/" TEST_OUTPUT_DIRECTROY = "TEST_OUTPUT_DIRECTORY/" ERROR_MSG_ALREADY_EXISTS = f"{TEST_INSTANCE_ID} already exists" ERROR_MSG_NOT_EXISTS = f"{TEST_INSTANCE_ID} does not exist" def setUp(self): storage_svc = S3StorageService("us-west-1") self.s3_storage_repo = S3InstanceRepository(storage_svc, self.TEST_BASE_DIR) self.mpc_instance = MPCInstance( instance_id=self.TEST_INSTANCE_ID, game_name=self.TEST_GAME_NAME, mpc_role=self.TEST_MPC_ROLE, num_workers=self.TEST_NUM_WORKERS, server_ips=self.TEST_SERVER_IPS, status=MPCInstanceStatus.CREATED, input_args=self.TEST_INPUT_ARGS, output_args=self.TEST_OUTPUT_ARGS, concurrency_args=self.TEST_CONCURRENCY_ARGS, input_directory=self.TEST_INPUT_DIRECTORY, output_directory=self.TEST_OUTPUT_DIRECTROY, ) def test_create_non_existing_instance(self): self.s3_storage_repo._exist = MagicMock(return_value=False) self.s3_storage_repo.s3_storage_svc.write = MagicMock(return_value=None) self.s3_storage_repo.create(self.mpc_instance) self.s3_storage_repo.s3_storage_svc.write.assert_called() def test_create_existing_instance(self): self.s3_storage_repo._exist = MagicMock( side_effect=RuntimeError(self.ERROR_MSG_ALREADY_EXISTS) ) with self.assertRaisesRegex(RuntimeError, self.ERROR_MSG_ALREADY_EXISTS): self.s3_storage_repo.create(self.mpc_instance) def test_read_non_existing_instance(self): self.s3_storage_repo._exist = MagicMock( side_effect=RuntimeError(self.ERROR_MSG_NOT_EXISTS) ) with self.assertRaisesRegex(RuntimeError, self.ERROR_MSG_NOT_EXISTS): self.s3_storage_repo.read(self.TEST_INSTANCE_ID) def test_read_existing_instance(self): self.s3_storage_repo._exist = MagicMock(return_value=True) self.s3_storage_repo.s3_storage_svc.read = MagicMock( return_value=pickle.dumps(self.mpc_instance, 0).decode() ) instance = self.s3_storage_repo.read(self.mpc_instance) self.assertEqual(self.mpc_instance, instance) def test_update_non_existing_instance(self): self.s3_storage_repo._exist = MagicMock( side_effect=RuntimeError(self.ERROR_MSG_NOT_EXISTS) ) with self.assertRaisesRegex(RuntimeError, self.ERROR_MSG_NOT_EXISTS): self.s3_storage_repo.update(self.mpc_instance) def test_update_existing_instance(self): self.s3_storage_repo._exist = MagicMock(return_value=True) self.s3_storage_repo.s3_storage_svc.write = MagicMock(return_value=None) self.s3_storage_repo.update(self.mpc_instance) self.s3_storage_repo.s3_storage_svc.write.assert_called() def test_delete_non_existing_instance(self): self.s3_storage_repo._exist = MagicMock( side_effect=RuntimeError(self.ERROR_MSG_NOT_EXISTS) ) with self.assertRaisesRegex(RuntimeError, self.ERROR_MSG_NOT_EXISTS): self.s3_storage_repo.delete(self.TEST_INSTANCE_ID) def test_delete_existing_instance(self): self.s3_storage_repo._exist = MagicMock(return_value=True) self.s3_storage_repo.s3_storage_svc.delete = MagicMock(return_value=None) self.s3_storage_repo.delete(self.TEST_INSTANCE_ID) self.s3_storage_repo.s3_storage_svc.delete.assert_called() def test_exists(self): self.s3_storage_repo.s3_storage_svc.file_exists = MagicMock(return_value=True) self.assertTrue(self.s3_storage_repo._exist(self.TEST_INSTANCE_ID))
/tests/service/test_container_aws.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch from fbpcs.service.container_aws import ( ContainerInstance, ContainerInstanceStatus, AWSContainerService, ) TEST_INSTANCE_ID_1 = "test-instance-id-1" TEST_INSTANCE_ID_2 = "test-instance-id-2" TEST_REGION = "us-west-2" TEST_KEY_ID = "test-key-id" TEST_KEY_DATA = "test-key-data" TEST_CLUSTER = "test-cluster" TEST_SUBNET = "test-subnet" TEST_IP_ADDRESS = "127.0.0.1" TEST_CONTAINER_DEFNITION = "test-task-definition#test-container-definition" class TestAWSContainerService(unittest.TestCase): @patch("fbpcs.gateway.ecs.ECSGateway") def setUp(self, MockECSGateway): self.container_svc = AWSContainerService( TEST_REGION, TEST_CLUSTER, TEST_SUBNET, TEST_KEY_ID, TEST_KEY_DATA ) self.container_svc.ecs_gateway = MockECSGateway() def test_create_instances(self): created_instances = [ ContainerInstance( TEST_INSTANCE_ID_1, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ContainerInstance( TEST_INSTANCE_ID_2, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ] self.container_svc.ecs_gateway.run_task = MagicMock( side_effect=created_instances ) cmd_list = ["test_cmd", "test_cmd-1"] container_instances = self.container_svc.create_instances( TEST_CONTAINER_DEFNITION, cmd_list ) self.assertEqual(container_instances, created_instances) self.assertEqual( self.container_svc.ecs_gateway.run_task.call_count, len(created_instances) ) async def test_create_instances_async(self): created_instances = [ ContainerInstance( TEST_INSTANCE_ID_1, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ContainerInstance( TEST_INSTANCE_ID_2, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ] self.container_svc.ecs_gateway.run_task = MagicMock( side_effect=created_instances ) cmd_list = ["test_cmd", "test_cmd-1"] container_instances = await self.container_svc.create_instances_async( TEST_CONTAINER_DEFNITION, cmd_list ) self.assertEqual(container_instances, created_instances) self.assertEqual( self.container_svc.ecs_gateway.run_task.call_count, len(created_instances) ) def test_create_instance(self): created_instance = ContainerInstance( TEST_INSTANCE_ID_1, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ) self.container_svc.ecs_gateway.run_task = MagicMock( return_value=created_instance ) container_instance = self.container_svc.create_instance( TEST_CONTAINER_DEFNITION, "test-cmd" ) self.assertEqual(container_instance, created_instance) def test_get_instance(self): container_instance = ContainerInstance( TEST_INSTANCE_ID_1, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ) self.container_svc.ecs_gateway.describe_task = MagicMock( return_value=container_instance ) instance = self.container_svc.get_instance(TEST_INSTANCE_ID_1) self.assertEqual(instance, container_instance) def test_get_instances(self): container_instances = [ ContainerInstance( TEST_INSTANCE_ID_1, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ContainerInstance( TEST_INSTANCE_ID_2, TEST_IP_ADDRESS, ContainerInstanceStatus.STARTED, ), ] self.container_svc.ecs_gateway.describe_tasks = MagicMock( return_value=container_instances ) instances = self.container_svc.get_instances( [TEST_INSTANCE_ID_1, TEST_INSTANCE_ID_2] ) self.assertEqual(instances, container_instances) def test_list_tasks(self): instance_ids = [TEST_INSTANCE_ID_1, TEST_INSTANCE_ID_2] self.container_svc.ecs_gateway.list_tasks = MagicMock(return_value=instance_ids) self.assertEqual(instance_ids, self.container_svc.list_tasks())
/tests/service/test_log_cloudwatch.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch from fbpcs.service.log_cloudwatch import CloudWatchLogService REGION = "us-west-1" LOG_GROUP = "test-group-name" LOG_PATH = "test-log-path" class TestCloudWatchLogService(unittest.TestCase): @patch("fbpcs.gateway.cloudwatch.CloudWatchGateway") def test_fetch(self, MockCloudWatchGateway): log_service = CloudWatchLogService(LOG_GROUP, REGION) mocked_log = {"test-events": [{"test-event-name": "test-event-data"}]} log_service.cloudwatch_gateway = MockCloudWatchGateway() log_service.cloudwatch_gateway.fetch = MagicMock(return_value=mocked_log) returned_log = log_service.cloudwatch_gateway.fetch(LOG_PATH) log_service.cloudwatch_gateway.fetch.assert_called() self.assertEqual(mocked_log, returned_log)
/tests/service/test_mpc.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import AsyncMock, MagicMock, patch from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus from fbpcs.entity.mpc_instance import MPCInstance, MPCInstanceStatus, MPCRole from fbpcs.service.mpc import MPCService TEST_INSTANCE_ID = "123" TEST_GAME_NAME = "lift" TEST_MPC_ROLE = MPCRole.SERVER TEST_NUM_WORKERS = 1 TEST_SERVER_IPS = ["192.0.2.0", "192.0.2.1"] TEST_INPUT_ARGS = "test_input_file" TEST_OUTPUT_ARGS = "test_output_file" TEST_CONCURRENCY_ARGS = 1 TEST_INPUT_DIRECTORY = "TEST_INPUT_DIRECTORY/" TEST_OUTPUT_DIRECTORY = "TEST_OUTPUT_DIRECTORY/" TEST_TASK_DEFINITION = "test_task_definition" INPUT_DIRECTORY = "input_directory" OUTPUT_DIRECTORY = "output_directory" GAME_ARGS = [ { "input_filenames": TEST_INPUT_ARGS, "input_directory": TEST_INPUT_DIRECTORY, "output_filenames": TEST_OUTPUT_ARGS, "output_directory": TEST_OUTPUT_DIRECTORY, "concurrency": TEST_CONCURRENCY_ARGS, } ] class TestMPCService(unittest.TestCase): def setUp(self): cspatcher = patch("fbpcs.service.container_aws.AWSContainerService") sspatcher = patch("fbpcs.service.storage_s3.S3StorageService") irpatcher = patch( "fbpcs.repository.mpc_instance_local.LocalMPCInstanceRepository" ) gspatcher = patch("fbpcs.service.mpc_game.MPCGameService") container_svc = cspatcher.start() storage_svc = sspatcher.start() instance_repository = irpatcher.start() mpc_game_svc = gspatcher.start() for patcher in (cspatcher, sspatcher, irpatcher, gspatcher): self.addCleanup(patcher.stop) self.mpc_service = MPCService( container_svc, storage_svc, instance_repository, "test_task_definition", mpc_game_svc, ) @staticmethod def _get_sample_mpcinstance(): return MPCInstance( instance_id=TEST_INSTANCE_ID, game_name=TEST_GAME_NAME, mpc_role=TEST_MPC_ROLE, num_workers=TEST_NUM_WORKERS, server_ips=TEST_SERVER_IPS, status=MPCInstanceStatus.CREATED, game_args=GAME_ARGS, ) @staticmethod def _get_sample_mpcinstance_with_game_args(): return MPCInstance( instance_id=TEST_INSTANCE_ID, game_name=TEST_GAME_NAME, mpc_role=TEST_MPC_ROLE, num_workers=TEST_NUM_WORKERS, status=MPCInstanceStatus.CREATED, server_ips=TEST_SERVER_IPS, game_args=GAME_ARGS, ) @staticmethod def _get_sample_mpcinstance_client(): return MPCInstance( instance_id=TEST_INSTANCE_ID, game_name=TEST_GAME_NAME, mpc_role=MPCRole.CLIENT, num_workers=TEST_NUM_WORKERS, server_ips=TEST_SERVER_IPS, status=MPCInstanceStatus.CREATED, game_args=GAME_ARGS, ) async def test_spin_up_containers_one_docker_inconsistent_arguments(self): with self.assertRaisesRegex( ValueError, "The number of containers is not consistent with the number of game argument dictionary.", ): await self.mpc_service._spin_up_containers_onedocker( game_name=TEST_GAME_NAME, mpc_role=MPCRole.SERVER, num_containers=TEST_NUM_WORKERS, game_args=[], ) with self.assertRaisesRegex( ValueError, "The number of containers is not consistent with number of ip addresses.", ): await self.mpc_service._spin_up_containers_onedocker( game_name=TEST_GAME_NAME, mpc_role=MPCRole.CLIENT, num_containers=TEST_NUM_WORKERS, ip_addresses=TEST_SERVER_IPS, ) def test_create_instance_with_game_args(self): self.mpc_service.create_instance( instance_id=TEST_INSTANCE_ID, game_name=TEST_GAME_NAME, mpc_role=TEST_MPC_ROLE, num_workers=TEST_NUM_WORKERS, server_ips=TEST_SERVER_IPS, game_args=GAME_ARGS, ) self.mpc_service.instance_repository.create.assert_called() self.assertEqual( self._get_sample_mpcinstance_with_game_args(), self.mpc_service.instance_repository.create.call_args[0][0], ) def test_create_instance(self): self.mpc_service.create_instance( instance_id=TEST_INSTANCE_ID, game_name=TEST_GAME_NAME, mpc_role=TEST_MPC_ROLE, num_workers=TEST_NUM_WORKERS, server_ips=TEST_SERVER_IPS, game_args=GAME_ARGS, ) # check that instance with correct instance_id was created self.mpc_service.instance_repository.create.assert_called() self.assertEquals( self._get_sample_mpcinstance(), self.mpc_service.instance_repository.create.call_args[0][0], ) def _read_side_effect_start(self, instance_id: str): """mock MPCInstanceRepository.read for test_start""" if instance_id == TEST_INSTANCE_ID: return self._get_sample_mpcinstance() else: raise RuntimeError(f"{instance_id} does not exist") def test_start_instance(self): self.mpc_service.instance_repository.read = MagicMock( side_effect=self._read_side_effect_start ) created_instances = [ ContainerInstance( "arn:aws:ecs:us-west-1:592513842793:task/57850450-7a81-43cc-8c73-2071c52e4a68", # noqa "10.0.1.130", ContainerInstanceStatus.STARTED, ) ] self.mpc_service.container_svc.create_instances_async = AsyncMock( return_value=created_instances ) built_one_docker_args = ("private_lift/lift", "test one docker arguments") self.mpc_service.mpc_game_svc.build_one_docker_args = MagicMock( return_value=built_one_docker_args ) # check that update is called with correct status self.mpc_service.start_instance(TEST_INSTANCE_ID) self.mpc_service.instance_repository.update.assert_called() latest_update = self.mpc_service.instance_repository.update.call_args_list[-1] updated_status = latest_update[0][0].status self.assertEqual(updated_status, MPCInstanceStatus.STARTED) def test_start_instance_missing_ips(self): self.mpc_service.instance_repository.read = MagicMock( return_value=self._get_sample_mpcinstance_client() ) # Exception because role is client but server ips are not given with self.assertRaises(ValueError): self.mpc_service.start_instance(TEST_INSTANCE_ID) def _read_side_effect_update(self, instance_id): """ mock MPCInstanceRepository.read for test_update, with instance.containers is not None """ if instance_id == TEST_INSTANCE_ID: mpc_instance = self._get_sample_mpcinstance() else: raise RuntimeError(f"{instance_id} does not exist") mpc_instance.status = MPCInstanceStatus.STARTED mpc_instance.containers = [ ContainerInstance( "arn:aws:ecs:us-west-1:592513842793:task/57850450-7a81-43cc-8c73-2071c52e4a68", # noqa "10.0.1.130", ContainerInstanceStatus.STARTED, ) ] return mpc_instance def test_update_instance(self): self.mpc_service.instance_repository.read = MagicMock( side_effect=self._read_side_effect_update ) container_instances = [ ContainerInstance( "arn:aws:ecs:us-west-1:592513842793:task/cd34aed2-321f-49d1-8641-c54baff8b77b", # noqa "10.0.1.130", ContainerInstanceStatus.STARTED, ) ] self.mpc_service.container_svc.get_instances = MagicMock( return_value=container_instances ) self.mpc_service.update_instance(TEST_INSTANCE_ID) self.mpc_service.instance_repository.update.assert_called()
/tests/service/test_onedocker.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import AsyncMock, patch from fbpcs.entity.container_instance import ContainerInstance, ContainerInstanceStatus from fbpcs.service.onedocker import OneDockerService class TestOneDockerService(unittest.TestCase): @patch("fbpcs.service.container.ContainerService") def setUp(self, MockContainerService): container_svc = MockContainerService() self.onedocker_svc = OneDockerService(container_svc) def test_start_container(self): mocked_container_info = ContainerInstance( "arn:aws:ecs:region:account_id:task/container_id", "192.0.2.0", ContainerInstanceStatus.STARTED, ) self.onedocker_svc.container_svc.create_instances_async = AsyncMock( return_value=[mocked_container_info] ) returned_container_info = self.onedocker_svc.start_container( "task_def", "project/exe_name", "cmd_args" ) self.assertEqual(returned_container_info, mocked_container_info) def test_start_containers(self): mocked_container_info = [ ContainerInstance( "arn:aws:ecs:region:account_id:task/container_id_1", "192.0.2.0", ContainerInstanceStatus.STARTED, ), ContainerInstance( "arn:aws:ecs:region:account_id:task/container_id_2", "192.0.2.1", ContainerInstanceStatus.STARTED, ), ] self.onedocker_svc.container_svc.create_instances_async = AsyncMock( return_value=mocked_container_info ) returned_container_info = self.onedocker_svc.start_containers( "task_def", "project/exe_name", ["--k1=v1", "--k2=v2"] ) self.assertEqual(returned_container_info, mocked_container_info) def test_get_cmd(self): package_name = "project/exe_name" cmd_args = "--k1=v1 --k2=v2" timeout = 3600 expected_cmd_without_timeout = "python3.8 -m one_docker_runner --package_name=project/exe_name --cmd='/root/one_docker/package/exe_name --k1=v1 --k2=v2'" expected_cmd_with_timeout = expected_cmd_without_timeout + " --timeout=3600" cmd_without_timeout = self.onedocker_svc._get_cmd(package_name, cmd_args) cmd_with_timeout = self.onedocker_svc._get_cmd(package_name, cmd_args, timeout) self.assertEqual(expected_cmd_without_timeout, cmd_without_timeout) self.assertEqual(expected_cmd_with_timeout, cmd_with_timeout)
/tests/service/test_storage.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fbpcs.service.storage import PathType, StorageService class TestStorageService(unittest.TestCase): def test_path_type_s3(self): type_ = StorageService.path_type( "https://bucket-name.s3.Region.amazonaws.com/key-name" ) self.assertEqual(type_, PathType.S3) def test_path_type_local(self): type_ = StorageService.path_type("/usr/file") self.assertEqual(type_, PathType.Local)
/tests/service/test_storage_s3.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import unittest from unittest.mock import call, MagicMock, patch from fbpcs.service.storage_s3 import S3StorageService class TestS3StorageService(unittest.TestCase): LOCAL_FILE = "/usr/test_file" LOCAL_FOLDER = "/foo" S3_FILE = "https://bucket.s3.Region.amazonaws.com/test_file" S3_FILE_COPY = "https://bucket.s3.Region.amazonaws.com/test_file_copy" S3_FOLDER = "https://bucket.s3.Region.amazonaws.com/test_folder/" S3_FOLDER_COPY = "https://bucket.s3.Region.amazonaws.com/test_folder_copy/" S3_FILE_WITH_SUBFOLDER = ( "https://bucket.s3.Region.amazonaws.com/test_folder/test_file" ) """ The layout of LOCAL_DIR: /foo/ ├── bar/ └── baz/ ├── a └── b """ LOCAL_DIR = [ ("/foo", ("bar",), ("baz",)), ("/foo/baz", (), ("a", "b")), ] S3_DIR = [ "test_folder/bar/", "test_folder/baz/", "test_folder/baz/a", "test_folder/baz/b", ] @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_local_to_s3(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.upload_file = MagicMock(return_value=None) service.copy(self.LOCAL_FILE, self.S3_FILE) service.s3_gateway.upload_file.assert_called_with( str(self.LOCAL_FILE), "bucket", "test_file" ) def test_copy_local_dir_to_s3_recursive_false(self): service = S3StorageService("us-west-1") with patch("os.path.isdir", return_value=True): self.assertRaises( ValueError, service.copy, self.LOCAL_FOLDER, self.S3_FOLDER, False ) @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_local_dir_to_s3_recursive_true(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.put_object = MagicMock(return_value=None) service.s3_gateway.upload_file = MagicMock(return_value=None) with patch("os.path.isdir", return_value=True): with patch("os.walk", return_value=self.LOCAL_DIR): service.copy(self.LOCAL_FOLDER, self.S3_FOLDER, True) service.s3_gateway.put_object.assert_called_with( "bucket", "test_folder/bar/", "" ) service.s3_gateway.upload_file.assert_has_calls( [ call("/foo/baz/a", "bucket", "test_folder/baz/a"), call("/foo/baz/b", "bucket", "test_folder/baz/b"), ], any_order=True, ) @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_s3_to_local(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.download_file = MagicMock(return_value=None) service.copy(self.S3_FILE, self.LOCAL_FILE) service.s3_gateway.download_file.assert_called_with( "bucket", "test_file", str(self.LOCAL_FILE) ) def test_copy_s3_dir_to_local_recursive_false(self): service = S3StorageService("us-west-1") self.assertRaises( ValueError, service.copy, self.S3_FOLDER, self.LOCAL_FOLDER, False ) @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_s3_dir_to_local_source_does_not_exist(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.object_exists = MagicMock(return_value=False) self.assertRaises( ValueError, service.copy, self.S3_FOLDER, self.LOCAL_FOLDER, False ) @patch("os.makedirs") @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_s3_dir_to_local_ok(self, MockS3Gateway, os_makedirs): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.object_exists = MagicMock(return_value=True) service.s3_gateway.list_object2 = MagicMock(return_value=self.S3_DIR) service.s3_gateway.download_file = MagicMock(return_value=None) service.copy(self.S3_FOLDER, self.LOCAL_FOLDER, True) os.makedirs.assert_has_calls( [ call("/foo/bar"), call("/foo/baz"), ], any_order=True, ) service.s3_gateway.download_file.assert_has_calls( [ call("bucket", "test_folder/baz/a", "/foo/baz/a"), call("bucket", "test_folder/baz/b", "/foo/baz/b"), ], any_order=True, ) @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_local_to_local(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() self.assertRaises(ValueError, service.copy, self.LOCAL_FILE, self.LOCAL_FILE) @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_s3_to_s3(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.copy(self.S3_FILE, self.S3_FILE_COPY) service.s3_gateway.copy.assert_called_with( "bucket", "test_file", "bucket", "test_file_copy" ) def test_copy_s3_dir_to_s3_recursive_false(self): service = S3StorageService("us-west-1") self.assertRaises( ValueError, service.copy, self.S3_FOLDER, self.S3_FOLDER_COPY, False ) def test_copy_s3_dir_to_s3_source_and_dest_are_the_same(self): service = S3StorageService("us-west-1") self.assertRaises( ValueError, service.copy, self.S3_FOLDER, self.S3_FOLDER, True ) @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_s3_dir_to_s3_source_does_not_exist(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.object_exists = MagicMock(return_value=False) self.assertRaises( ValueError, service.copy, self.S3_FOLDER, self.S3_FOLDER_COPY, False ) @patch("os.makedirs") @patch("fbpcs.gateway.s3.S3Gateway") def test_copy_s3_dir_to_s3_ok(self, MockS3Gateway, os_makedirs): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.s3_gateway.object_exists = MagicMock(return_value=True) service.s3_gateway.list_object2 = MagicMock(return_value=self.S3_DIR) service.s3_gateway.put_object = MagicMock(return_value=None) service.s3_gateway.copy = MagicMock(return_value=None) service.copy(self.S3_FOLDER, self.S3_FOLDER_COPY, True) service.s3_gateway.put_object.assert_has_calls( [ call("bucket", "test_folder_copy/bar/", ""), call("bucket", "test_folder_copy/baz/", ""), ], any_order=True, ) service.s3_gateway.copy.assert_has_calls( [ call("bucket", "test_folder/baz/a", "bucket", "test_folder_copy/baz/a"), call("bucket", "test_folder/baz/b", "bucket", "test_folder_copy/baz/b"), ], any_order=True, ) @patch("fbpcs.gateway.s3.S3Gateway") def test_delete_s3(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.delete(self.S3_FILE) service.s3_gateway.delete_object.assert_called_with("bucket", "test_file") @patch("fbpcs.gateway.s3.S3Gateway") def test_file_exists(self, MockS3Gateway): service = S3StorageService("us-west-1") service.s3_gateway = MockS3Gateway() service.file_exists(self.S3_FILE) service.s3_gateway.object_exists.assert_called_with("bucket", "test_file")
/tests/util/test_reflect.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fbpcs.util.reflect import get_class from fbpcs.util.s3path import S3Path TEST_CLASS_PATH = "fbpcs.util.s3path.S3Path" class TestReflect(unittest.TestCase): def test_get_class(self): self.assertEqual(S3Path, get_class(TEST_CLASS_PATH))
/tests/util/test_s3path.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fbpcs.util.s3path import S3Path class TestS3Path(unittest.TestCase): def test_s3path_no_subfolder(self): test_s3path = S3Path("https://bucket-name.s3.Region.amazonaws.com/key-name") self.assertEqual(test_s3path.region, "Region") self.assertEqual(test_s3path.bucket, "bucket-name") self.assertEqual(test_s3path.key, "key-name") def test_s3path_with_subfoler(self): test_s3path = S3Path( "https://bucket-name.s3.Region.amazonaws.com/subfolder/key" ) self.assertEqual(test_s3path.region, "Region") self.assertEqual(test_s3path.bucket, "bucket-name") self.assertEqual(test_s3path.key, "subfolder/key") def test_s3path_invalid_fileURL(self): test_url = "an invalid fileURL" with self.assertRaises(ValueError): S3Path(test_url)
/tests/util/test_typing.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fbpcs.util.typing import checked_cast TEST_STR = "test" TEST_NUM = 123 class TestTyping(unittest.TestCase): def test_checked_cast(self): error = f"Value was not of type {type!r}:\n{TEST_STR!r}" with self.assertRaisesRegex(ValueError, error): checked_cast(int, TEST_STR) self.assertEqual(checked_cast(int, TEST_NUM), TEST_NUM)
/tests/util/test_yaml.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import unittest from unittest.mock import patch, mock_open from fbpcs.util.yaml import load, dump TEST_FILENAME = "TEST_FILE" TEST_DICT = { "test_dict": [ {"test_key_1": "test_value_1"}, {"test_key_1": "test_value_2"}, ] } class TestYaml(unittest.TestCase): data = json.dumps(TEST_DICT) @patch("builtins.open", new_callable=mock_open, read_data=data) def test_load(self, mock_file): self.assertEqual(open(TEST_FILENAME).read(), self.data) load_data = load(TEST_FILENAME) self.assertEqual(load_data, TEST_DICT) mock_file.assert_called_with(TEST_FILENAME) @patch("builtins.open") @patch("yaml.dump") def test_dump(self, mock_dump, mock_open): mock_dump.return_value = None stream = mock_open().__enter__.return_value self.assertIsNone(dump(TEST_DICT, TEST_FILENAME)) mock_open.assert_called_with(TEST_FILENAME, "w") mock_dump.assert_called_with(TEST_DICT, stream)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
estebanfloresf/testcases
refs/heads/master
{"/testcases/spiders/createTestCase.py": ["/testcases/items.py"], "/testcases/spiders/testSpider.py": ["/testcases/items.py"]}
└── ├── testcases │ ├── items.py │ ├── main.py │ ├── spiders │ │ ├── createTestCase.py │ │ └── testSpider.py │ └── variables.py └── utils ├── generateTC.py ├── readFiles.py └── readTestCases.py
/testcases/items.py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class TestCasesItem(scrapy.Item): component = scrapy.Field() requirements = scrapy.Field() responsive = scrapy.Field() pass class Requirements(scrapy.Item): description = scrapy.Field() pass class Responsive(scrapy.Item): device = scrapy.Field() requirements = scrapy.Field() pass
/testcases/main.py
from scrapy import cmdline import os import inspect import logging path = os.path.abspath(os.path.join(os.path.dirname( os.path.realpath(__file__)), os.pardir)) # script directory # To generate the verified labels from the input excel (uncomment line below) # os.system('python '+path+'\\utils\\generateTC.py') # To Make a scrape of the confluence page (uncomment line below) # var = input("Please enter something: ") # print("You entered " + str(var)) cmdline.execute("scrapy crawl createTestCase".split()) # To read excel file # os.system('python '+path+'\\utils\\readTestCases.py')
/testcases/spiders/createTestCase.py
# -*- coding: utf-8 -*- import scrapy from scrapy.utils.project import get_project_settings from ..items import TestCasesItem from scrapy.loader import ItemLoader class createTestCaseSpider(scrapy.Spider): name = "createTestCase" settings = get_project_settings() http_user = settings.get('HTTP_USER') http_pass = settings.get('HTTP_PASS') allowed_domains = ["confluence.verndale.com"] start_urls = ['https://confluence.verndale.com/display/GEHC/My+Profile+Page+-+DOC'] def parse(self, response): item = TestCasesItem() title = response.xpath('//*[@id="title-text"]/a/text()').extract_first() print('Documentation: '+title) table_xpath = '//*[@id="main-content"]/div/div[4]/div/div/div[1]/table/tbody/tr' table = response.xpath(table_xpath) for index, row in enumerate(table): if (index > 0): components = row.select('.//td[2]/text() | .//td[2]/p/text()').extract() for compName in components: item['component'] = str(compName) print('Verify ' + compName + ' Component') # This path is usually the one to be used component_xpath = ".//td[3][contains(@class,'confluenceTd')]" description = "" if (row.select(component_xpath + "/a/text()").extract()): requirements = row.select(component_xpath + "/a//text()").extract() description = "|".join(requirements) else: if (row.select(component_xpath + "/ul//*/text()").extract()): requirements = row.select(component_xpath + "/ul//li//text()").extract() print(requirements) description = "|".join(requirements) else: if (row.select(component_xpath +"/div"+ "/ul//*/text()").extract()): requirements = row.select(component_xpath +"/div"+ "/ul//li//text()").extract() description = "|".join(requirements) item['requirements'] = str(description) yield item
/testcases/spiders/testSpider.py
# -*- coding: utf-8 -*- import scrapy from scrapy import Request from scrapy.utils.project import get_project_settings from ..items import TestCasesItem, Responsive, Requirements from scrapy.spidermiddlewares.httperror import HttpError from twisted.internet.error import DNSLookupError from twisted.internet.error import TimeoutError, TCPTimedOutError class TestspiderSpider(scrapy.Spider): name = "testspider" settings = get_project_settings() http_user = settings.get('HTTP_USER') http_pass = settings.get('HTTP_PASS') allowed_domains = ["confluence.verndale.com"] def __init__(self, url): super(TestspiderSpider, self).__init__() self.start_urls = [url] def parse(self, response): table = response.xpath('//*[@id="main-content"]/div/div[4]/div/div/div[1]/table/tbody/tr') for index, row in enumerate(table): testcase = TestCasesItem() if index > 0: testcase['component'] = str(row.select('.//td[2]/text() | .//td[2]/p/text()').extract_first()).strip() request = Request( self.start_urls[0], callback=self.responsive_req, errback=self.errback_httpbin, dont_filter=True, meta={'testcase': testcase, 'row': row} ) yield request def responsive_req(self, response): row = response.meta['row'] testcase = response.meta['testcase'] list_responsive = [] # Section Responsive Notes responsive_path = row.xpath(".//td[3]/div[contains(@class,'content-wrapper')]") path = ".//div[contains(@class,'confluence-information-macro confluence-information-macro-information conf-macro output-block')]" # If to see if the component has responsive requirements if responsive_path.xpath(path): for req in responsive_path.xpath(path): # If to see if the responsive requirements has devices if req.xpath(".//div/p/span/text()").extract(): for device in req.xpath(".//div/p/span/text()").extract(): # Save Devices responsive = Responsive() responsive['device'] = str(device).strip(':') request = Request( self.start_urls[0], callback=self.requirements, errback=self.errback_httpbin, dont_filter=True, meta={'responsive': responsive, 'row': row, 'testcase': testcase} ) yield request else: responsive = Responsive() requirement = Requirements() requirement_list = [] for index,req in enumerate(req.xpath(".//div/p/text()").extract()): requirement['description'] = req requirement_list.append(requirement) responsive['requirements']=requirement_list testcase['responsive'] = responsive yield testcase else: yield testcase # testcase['responsive'] = list_responsive def requirements(self, response): responsive = response.meta['responsive'] testcase = response.meta['testcase'] responsive['requirements'] = "sample" testcase['responsive'] = responsive # # requirements = [] # path = ".//div[contains(@class,'confluence-information-macro-body')]//*/text()" # # for elem in response.xpath(path).extract(): # if (str(elem).strip(':') not in responsive['device']): # requirements.append(str(elem).strip()) # # responsive['requirements'] = requirements # # Final testcase is added the devices and requirements for each # # # After creating the item appended to the devices list # devices.append(responsive) # testcase['responsive'] = devices # yield testcase # Function for handling Errors def errback_httpbin(self, failure): # log all failures self.logger.error(repr(failure)) # in case you want to do something special for some errors, # you may need the failure's type: if failure.check(HttpError): # these exceptions come from HttpError spider middleware # you can get the non-200 response response = failure.value.response self.logger.error('HttpError on %s', response.url) elif failure.check(DNSLookupError): # this is the original request request = failure.request self.logger.error('DNSLookupError on %s', request.url) elif failure.check(TimeoutError, TCPTimedOutError): request = failure.request self.logger.error('TimeoutError on %s', request.url)
/testcases/variables.py
USER='Esteban.Flores' PASS='estebanFS10'
/utils/generateTC.py
from openpyxl import load_workbook #import the pandas library and aliasing as pd and numpy as np import pandas as pd import numpy as np import os class createTestCase(): def __init__(self): self.dir_path = os.path.dirname(os.path.realpath(__file__)) self.wb = load_workbook(self.dir_path+'\\files\\inputTC.xlsx') self.ws = self.wb['Sheet1'] self.commonWords = ["note:","notes:","important note:","onclick/ontap","consists of:"] self.changeWords = [ {"from": "will be", "to": "is"}, {"from": "will wrap", "to": "wraps"}, {"from": "will not be", "to": "is not"}, {"from": "will dissapear", "to": "dissapears"}, {"from": "will have", "to": "has"}, {"from": "will move up", "to": "moves up"}, {"from": "will fall back", "to": "fallbacks"}, {"from": "will never be", "to": "is never"}, {"from": "if", "to": "when"} ] self.verifyLst= [] self.expectedLst= [] # # Transform the ws into a panda dataframe self.df = pd.DataFrame(self.ws.values) # # replace None values with NA and drop them self.df = self.df.replace(to_replace='None', value=np.nan).dropna() header = self.df.iloc[0] self.df = self.df[1:] self.df = self.df.rename(columns = header) self.df = self.df.reset_index(drop=True) self.dfList = self.df[header].values def __main__(self): self.createVfyLst(self.dfList) self.createExpLst(self.dfList) self.df.to_csv(self.dir_path+'\\resultsTC.csv',encoding='utf-8', index=False) def createVfyLst(self,dfList): try: for req in dfList: band =0 req = str(req[0]).lower() reqToLst = req.split(' ') for word in reqToLst: if(word in self.commonWords): band =1 break if(band==0): self.verifyLst.append("Verify "+req) else: self.verifyLst.append(req.capitalize()) # Find the name of the column by index replaceClmn = self.df.columns[0] # Drop that column self.df.drop(replaceClmn, axis = 1, inplace = True) # Put whatever series you want in its place self.df[replaceClmn] = self.verifyLst except ValueError: print("There was a problem") def createExpLst(self,dfList): try: for req in dfList: req = str(req[0]).lower() for wordrplc in self.changeWords: if(wordrplc['from'] in req): req = req.replace(wordrplc['from'],wordrplc['to'] ) break self.expectedLst.append(str(req).capitalize()) self.df['Expected'] = self.expectedLst # Adding columns wth -1 value for the excel testcase format browserList = [-1] * len(self.expectedLst) browserListNoApply = ['---'] * len(self.expectedLst) self.df['windowsIE'] = browserList self.df['windowsCH'] = browserList self.df['windowsFF'] = browserList self.df['macSF'] = browserListNoApply self.df['macCH'] = browserListNoApply self.df['macFF'] = browserListNoApply print("CSV file generated with success") except ValueError: print("There was a problem") if __name__ == "__main__": app = createTestCase() app.__main__()
/utils/readFiles.py
import os import re path = os.chdir('C://Users//503025052//Documents//GE//GE TestCases') filenames = os.listdir(path) for index,filename in enumerate(filenames): try: extension = os.path.splitext(filename)[1][1:] if(extension=='xlsx'): number =re.findall(r'\d+', str(filename)) if(number[0]): taskName = filename.replace(number[0],'') taskName = taskName.replace(extension,'') taskName = taskName.replace('-','') taskName = taskName.replace('.','') taskName = taskName.replace('(QA)','') taskName = taskName.strip() numberJira = int(number[0])-3 print(str(index)+'|'+str(taskName)+'|https://jira.verndale.com/browse/GEHC-'+str(numberJira)) except IOError: print('Cant change %s' % (filename)) print("All Files have been updated")
/utils/readTestCases.py
from openpyxl import load_workbook import re import json class readFile(): def __init__(self): path = 'C:\\Users\\Esteban.Flores\\Documents\\1 Verndale\\2 Projects\\GE-GeneralElectric\\GE TestCases\\0942-(QA) Course Registration Module.xlsx' self.wb = load_workbook(path, data_only=True) self.cleanWords = [ {"from": "Verify", "to": ""}, {"from": ":", "to": ""}, {"from": "On click", "to": "cta"}, {"from": "On hover", "to": "cta"}, {"from": "Component", "to": ""}, {"from": "page displays accordingly in mobile", "to": "mobile/tablet"}, {"from": "rtf (rich text format)", "to": "verify optional content managed rtf (rich text format)"}, ] self.tagWords = [ {"has": "text", "tag": "text"}, {"has": "hover", "tag": "cta"}, {"has": "click", "tag": "cta"}, {"has": "rtf", "tag": "text"}, {"has": "link", "tag": "link"}, {"has": "image", "tag": "image"}, ] self.final =[] def __main__(self): for a in self.wb.sheetnames: validSheet = re.compile('TC|Mobile') # validate expression to see if sheetname is an actual testcase if(bool(re.search(validSheet, a))): self.readCells(a) def readCells(self, sheet): item = { "component":"", "testcases":[] } # Get Component Name of the sheet item['component'] = self.cleanCell(self.wb[str(sheet)].cell(row=1,column=2).value) # Make a list of all the description columns data = [self.wb[str(sheet)].cell( row=i, column=2).value for i in range(13, 150)] counter = 0 for cell in data: test = {} if(cell != None): if('Verify' in cell): # Get testcase of sheet test[str(counter)] = cell.lower() counter+=1 # Get tag for each testcase for tag in self.tagWords: if(tag['has'] in cell): test["tag"] = tag['tag'] if(item['component']=='mobile/tablet'): test["tag"] = 'mobile' if(test != {}): item["testcases"].append(test) self.final.append(item) with open('data.json', 'w') as outfile: json.dump(self.final, outfile) def cleanCell(self,cell): for word in self.cleanWords: cell = cell.replace(word['from'],word['to']) cell = cell.lower() return cell.strip() if(__name__ == "__main__"): app=readFile() app.__main__()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
RobMurray98/BribeNet
refs/heads/master
{"/src/BribeNet/graph/generation/generator.py": ["/src/BribeNet/graph/generation/__init__.py"], "/src/BribeNet/graph/generation/flatWeightGenerator.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/graph/generation/weightedGenerator.py"], "/src/BribeNet/graph/generation/unweightedGenerator.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/graph/generation/generator.py"], "/src/BribeNet/graph/generation/weightedGenerator.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/graph/generation/generator.py"], "/src/BribeNet/graph/ratingGraph.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/graph/generation/flatWeightGenerator.py", "/src/BribeNet/graph/generation/generator.py", "/src/BribeNet/graph/ratingMethod.py", "/src/BribeNet/helpers/bribeNetException.py", "/src/BribeNet/bribery/briber.py"], "/src/BribeNet/gui/apps/static/static.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/gui/apps/static/graph.py", "/src/BribeNet/gui/apps/static/wizard/wizard.py", "/src/BribeNet/graph/generation/flatWeightGenerator.py", "/src/BribeNet/bribery/static/oneMoveRandomBriber.py", "/src/BribeNet/bribery/static/oneMoveInfluentialNodeBriber.py", "/src/BribeNet/gui/apps/static/result.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/src/BribeNet/gui/apps/temporal/main.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/gui/apps/static/wizard/algos/barabasi_albert.py", "/src/BribeNet/gui/apps/static/wizard/algos/composite.py", "/src/BribeNet/bribery/temporal/oneMoveEvenBriber.py", "/src/BribeNet/gui/apps/temporal/result.py", "/src/BribeNet/graph/generation/flatWeightGenerator.py", "/src/BribeNet/gui/apps/temporal/graph.py", "/src/BribeNet/gui/apps/temporal/briber_wizard/strategies/p_greedy.py", "/src/BribeNet/graph/temporal/action/actionType.py", "/src/BribeNet/gui/apps/temporal/results_wizard/results.py", "/src/BribeNet/gui/apps/temporal/wizard/wizard.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/bribery/temporal/pGreedyBriber.py", "/src/BribeNet/graph/temporal/thresholdGraph.py", "/src/BribeNet/bribery/temporal/mostInfluentialNodeBriber.py"], "/test/BribeNet/graph/generation/test_unweightedGenerator.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/graph/generation/unweightedGenerator.py"], "/test/BribeNet/graph/generation/test_weightedGenerator.py": ["/src/BribeNet/graph/generation/__init__.py", "/src/BribeNet/graph/generation/flatWeightGenerator.py"], "/src/BribeNet/gui/apps/static/wizard/generation.py": ["/src/BribeNet/gui/apps/static/wizard/algos/barabasi_albert.py", "/src/BribeNet/gui/apps/static/wizard/algos/composite.py", "/src/BribeNet/gui/apps/static/wizard/algos/watts_strogatz.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_method.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/weighted_p_rating.py", "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/o_rating.py", "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/p_gamma_rating.py", "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/median_p_rating.py", "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/p_rating.py", "/src/BribeNet/graph/ratingMethod.py", "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/weighted_median_p_rating.py"], "/src/BribeNet/gui/apps/temporal/wizard/bribers.py": ["/src/BribeNet/gui/apps/temporal/briber_wizard/window.py"], "/src/BribeNet/graph/temporal/ratingGraph.py": ["/src/BribeNet/graph/temporal/weighting/traverseWeighting.py", "/src/BribeNet/bribery/temporal/action/briberyAction.py", "/src/BribeNet/graph/temporal/action/customerAction.py", "/src/BribeNet/bribery/temporal/action/multiBriberyAction.py", "/src/BribeNet/graph/ratingGraph.py", "/src/BribeNet/bribery/temporal/briber.py", "/src/BribeNet/helpers/bribeNetException.py"], "/test/BribeNet/bribery/temporal/test_randomBriber.py": ["/src/BribeNet/bribery/temporal/randomBriber.py", "/test/BribeNet/bribery/temporal/briberTestCase.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/test/BribeNet/graph/temporal/test_multiBriberRatingGraph.py": ["/src/BribeNet/bribery/temporal/randomBriber.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/src/BribeNet/bribery/temporal/action/multiBriberyAction.py": ["/src/BribeNet/bribery/temporal/action/briberyAction.py", "/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/action/__init__.py", "/src/BribeNet/graph/temporal/ratingGraph.py", "/src/BribeNet/bribery/temporal/briber.py", "/src/BribeNet/helpers/bribeNetException.py"], "/src/BribeNet/bribery/temporal/action/singleBriberyAction.py": ["/src/BribeNet/bribery/temporal/action/briberyAction.py", "/src/BribeNet/bribery/temporal/action/__init__.py", "/src/BribeNet/graph/temporal/ratingGraph.py", "/src/BribeNet/bribery/temporal/briber.py"], "/src/BribeNet/graph/temporal/action/customerAction.py": ["/src/BribeNet/bribery/temporal/action/briberyAction.py", "/src/BribeNet/graph/temporal/action/actionType.py", "/src/BribeNet/graph/temporal/ratingGraph.py", "/src/BribeNet/bribery/temporal/briber.py", "/src/BribeNet/helpers/bribeNetException.py"], "/test/BribeNet/bribery/temporal/action/test_briberyAction.py": ["/src/BribeNet/bribery/temporal/action/briberyAction.py", "/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/src/BribeNet/gui/apps/temporal/wizard/wizard.py": ["/src/BribeNet/gui/apps/temporal/wizard/bribers.py", "/src/BribeNet/gui/apps/temporal/wizard/generation.py", "/src/BribeNet/graph/ratingMethod.py", "/src/BribeNet/gui/apps/temporal/wizard/settings.py", "/src/BribeNet/gui/apps/temporal/wizard/rating_method.py", "/src/BribeNet/helpers/bribeNetException.py"], "/src/BribeNet/bribery/temporal/briber.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/graph/temporal/ratingGraph.py", "/src/BribeNet/helpers/bribeNetException.py", "/src/BribeNet/bribery/briber.py"], "/src/BribeNet/bribery/temporal/mostInfluentialNodeBriber.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/briber.py"], "/src/BribeNet/bribery/temporal/nonBriber.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/briber.py"], "/src/BribeNet/bribery/temporal/oneMoveEvenBriber.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/briber.py"], "/src/BribeNet/bribery/temporal/pGreedyBriber.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/briber.py"], "/src/BribeNet/bribery/temporal/randomBriber.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/briber.py"], "/test/BribeNet/bribery/temporal/action/test_multiBriberyAction.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/action/multiBriberyAction.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/test/BribeNet/bribery/temporal/action/test_singleBriberyAction.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/test/BribeNet/graph/temporal/action/test_customerAction.py": ["/src/BribeNet/bribery/temporal/action/singleBriberyAction.py", "/src/BribeNet/graph/temporal/action/customerAction.py", "/src/BribeNet/graph/temporal/action/actionType.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/src/BribeNet/gui/apps/temporal/graph.py": ["/src/BribeNet/gui/apps/temporal/results_wizard/window.py"], "/src/BribeNet/gui/apps/temporal/result.py": ["/src/BribeNet/gui/apps/temporal/results_wizard/window.py"], "/test/BribeNet/bribery/temporal/test_budgetBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py", "/src/BribeNet/bribery/temporal/mostInfluentialNodeBriber.py"], "/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py": ["/test/BribeNet/bribery/temporal/briberTestCase.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/src/BribeNet/gui/main.py": ["/src/BribeNet/gui/apps/temporal/main.py", "/src/BribeNet/gui/apps/main.py", "/src/BribeNet/gui/apps/static/static.py"], "/src/docker_main.py": ["/src/BribeNet/gui/main.py"], "/src/BribeNet/graph/static/ratingGraphBuilder.py": ["/src/BribeNet/bribery/static/influentialNodeBriber.py", "/src/BribeNet/bribery/static/oneMoveRandomBriber.py", "/src/BribeNet/bribery/static/randomBriber.py", "/src/BribeNet/bribery/static/oneMoveInfluentialNodeBriber.py", "/src/BribeNet/bribery/static/nonBriber.py", "/src/BribeNet/graph/static/ratingGraph.py", "/src/BribeNet/bribery/briber.py"], "/test/BribeNet/graph/static/test_ratingGraphBuilder.py": ["/src/BribeNet/bribery/static/influentialNodeBriber.py", "/src/BribeNet/bribery/static/oneMoveRandomBriber.py", "/src/BribeNet/bribery/static/randomBriber.py", "/src/BribeNet/bribery/static/oneMoveInfluentialNodeBriber.py", "/src/BribeNet/graph/static/ratingGraphBuilder.py", "/src/BribeNet/bribery/static/nonBriber.py"], "/test/BribeNet/prediction/test_parameterPrediction.py": ["/src/BribeNet/prediction/parameterPrediction.py"], "/src/BribeNet/graph/temporal/noCustomerActionGraph.py": ["/src/BribeNet/graph/temporal/action/customerAction.py", "/src/BribeNet/graph/temporal/ratingGraph.py"], "/src/BribeNet/graph/temporal/thresholdGraph.py": ["/src/BribeNet/graph/temporal/action/customerAction.py", "/src/BribeNet/graph/temporal/action/actionType.py", "/src/BribeNet/graph/temporal/ratingGraph.py"], "/src/BribeNet/gui/apps/static/wizard/algos/barabasi_albert.py": ["/src/BribeNet/gui/classes/param_list_frame.py"], "/src/BribeNet/gui/apps/static/wizard/algos/composite.py": ["/src/BribeNet/gui/classes/param_list_frame.py"], "/src/BribeNet/gui/apps/static/wizard/algos/watts_strogatz.py": ["/src/BribeNet/gui/classes/param_list_frame.py"], "/src/BribeNet/gui/apps/temporal/briber_wizard/strategies/p_greedy.py": ["/src/BribeNet/gui/classes/param_list_frame.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py": ["/src/BribeNet/gui/classes/param_list_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/src/BribeNet/gui/apps/temporal/wizard/settings.py": ["/src/BribeNet/gui/classes/param_list_frame.py"], "/src/BribeNet/gui/classes/param_list_frame.py": ["/src/BribeNet/gui/classes/tooltip.py"], "/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py": ["/src/BribeNet/bribery/static/oneMoveRandomBriber.py", "/test/BribeNet/bribery/static/briberTestCase.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/test/BribeNet/bribery/static/test_randomBriber.py": ["/src/BribeNet/bribery/static/randomBriber.py", "/test/BribeNet/bribery/static/briberTestCase.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/test/BribeNet/graph/static/test_multiBriberRatingGraph.py": ["/src/BribeNet/bribery/static/randomBriber.py", "/src/BribeNet/bribery/static/nonBriber.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py": ["/src/BribeNet/bribery/static/oneMoveInfluentialNodeBriber.py", "/test/BribeNet/bribery/static/briberTestCase.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/src/BribeNet/gui/apps/temporal/briber_wizard/frame.py": ["/src/BribeNet/gui/apps/temporal/briber_wizard/strategies/p_greedy.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/median_p_rating.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/o_rating.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/p_gamma_rating.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/p_rating.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/weighted_median_p_rating.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/src/BribeNet/gui/apps/temporal/wizard/rating_methods/weighted_p_rating.py": ["/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py", "/src/BribeNet/graph/ratingMethod.py"], "/test/BribeNet/graph/temporal/test_thresholdGraph.py": ["/src/BribeNet/graph/temporal/action/actionType.py", "/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/thresholdGraph.py"], "/src/BribeNet/bribery/briber.py": ["/src/BribeNet/graph/ratingGraph.py", "/src/BribeNet/helpers/bribeNetException.py"], "/src/BribeNet/graph/static/ratingGraph.py": ["/src/BribeNet/graph/ratingGraph.py", "/src/BribeNet/bribery/static/briber.py", "/src/BribeNet/helpers/bribeNetException.py"], "/src/BribeNet/bribery/static/influentialNodeBriber.py": ["/src/BribeNet/bribery/static/briber.py"], "/src/BribeNet/bribery/static/nonBriber.py": ["/src/BribeNet/bribery/static/briber.py"], "/src/BribeNet/bribery/static/oneMoveInfluentialNodeBriber.py": ["/src/BribeNet/bribery/static/briber.py", "/src/BribeNet/bribery/briber.py"], "/src/BribeNet/bribery/static/oneMoveRandomBriber.py": ["/src/BribeNet/bribery/static/briber.py"], "/src/BribeNet/bribery/static/randomBriber.py": ["/src/BribeNet/bribery/static/briber.py"], "/src/BribeNet/bribery/temporal/action/briberyAction.py": ["/src/BribeNet/graph/temporal/ratingGraph.py", "/src/BribeNet/bribery/temporal/briber.py", "/src/BribeNet/helpers/bribeNetException.py"], "/test/BribeNet/bribery/temporal/briberTestCase.py": ["/src/BribeNet/bribery/temporal/nonBriber.py", "/src/BribeNet/graph/temporal/noCustomerActionGraph.py"], "/test/BribeNet/bribery/static/briberTestCase.py": ["/src/BribeNet/bribery/static/nonBriber.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/test/BribeNet/bribery/test_briber.py": ["/src/BribeNet/bribery/static/nonBriber.py", "/test/BribeNet/bribery/static/briberTestCase.py", "/src/BribeNet/bribery/briber.py"], "/test/BribeNet/graph/static/test_singleBriberRatingGraph.py": ["/src/BribeNet/bribery/static/nonBriber.py", "/src/BribeNet/graph/static/ratingGraph.py"], "/src/BribeNet/gui/apps/temporal/briber_wizard/window.py": ["/src/BribeNet/gui/apps/temporal/briber_wizard/frame.py"], "/src/BribeNet/gui/apps/temporal/wizard/generation.py": ["/src/BribeNet/gui/apps/static/wizard/generation.py"], "/src/BribeNet/gui/apps/temporal/results_wizard/window.py": ["/src/BribeNet/gui/apps/temporal/results_wizard/frame.py"], "/src/BribeNet/graph/generation/__init__.py": ["/src/BribeNet/graph/generation/algo/compositeGenerator.py", "/src/BribeNet/helpers/bribeNetException.py"], "/src/BribeNet/bribery/static/briber.py": ["/src/BribeNet/helpers/bribeNetException.py", "/src/BribeNet/graph/static/ratingGraph.py", "/src/BribeNet/bribery/briber.py"], "/src/BribeNet/bribery/temporal/action/__init__.py": ["/src/BribeNet/helpers/bribeNetException.py"]}
└── ├── setup.py ├── src │ ├── BribeNet │ │ ├── bribery │ │ │ ├── briber.py │ │ │ ├── static │ │ │ │ ├── briber.py │ │ │ │ ├── influentialNodeBriber.py │ │ │ │ ├── nonBriber.py │ │ │ │ ├── oneMoveInfluentialNodeBriber.py │ │ │ │ ├── oneMoveRandomBriber.py │ │ │ │ └── randomBriber.py │ │ │ └── temporal │ │ │ ├── action │ │ │ │ ├── __init__.py │ │ │ │ ├── briberyAction.py │ │ │ │ ├── multiBriberyAction.py │ │ │ │ └── singleBriberyAction.py │ │ │ ├── briber.py │ │ │ ├── mostInfluentialNodeBriber.py │ │ │ ├── nonBriber.py │ │ │ ├── oneMoveEvenBriber.py │ │ │ ├── pGreedyBriber.py │ │ │ └── randomBriber.py │ │ ├── graph │ │ │ ├── generation │ │ │ │ ├── __init__.py │ │ │ │ ├── algo │ │ │ │ │ └── compositeGenerator.py │ │ │ │ ├── flatWeightGenerator.py │ │ │ │ ├── generator.py │ │ │ │ ├── unweightedGenerator.py │ │ │ │ └── weightedGenerator.py │ │ │ ├── ratingGraph.py │ │ │ ├── ratingMethod.py │ │ │ ├── static │ │ │ │ ├── ratingGraph.py │ │ │ │ └── ratingGraphBuilder.py │ │ │ └── temporal │ │ │ ├── action │ │ │ │ ├── actionType.py │ │ │ │ └── customerAction.py │ │ │ ├── noCustomerActionGraph.py │ │ │ ├── ratingGraph.py │ │ │ ├── thresholdGraph.py │ │ │ └── weighting │ │ │ ├── communityWeighting.py │ │ │ └── traverseWeighting.py │ │ ├── gui │ │ │ ├── apps │ │ │ │ ├── main.py │ │ │ │ ├── static │ │ │ │ │ ├── briber_wizard │ │ │ │ │ │ ├── frame.py │ │ │ │ │ │ └── window.py │ │ │ │ │ ├── graph.py │ │ │ │ │ ├── result.py │ │ │ │ │ ├── static.py │ │ │ │ │ └── wizard │ │ │ │ │ ├── algos │ │ │ │ │ │ ├── barabasi_albert.py │ │ │ │ │ │ ├── composite.py │ │ │ │ │ │ └── watts_strogatz.py │ │ │ │ │ ├── generation.py │ │ │ │ │ └── wizard.py │ │ │ │ └── temporal │ │ │ │ ├── briber_wizard │ │ │ │ │ ├── frame.py │ │ │ │ │ ├── strategies │ │ │ │ │ │ └── p_greedy.py │ │ │ │ │ └── window.py │ │ │ │ ├── graph.py │ │ │ │ ├── main.py │ │ │ │ ├── result.py │ │ │ │ ├── results_wizard │ │ │ │ │ ├── frame.py │ │ │ │ │ ├── results.py │ │ │ │ │ └── window.py │ │ │ │ └── wizard │ │ │ │ ├── bribers.py │ │ │ │ ├── generation.py │ │ │ │ ├── rating_method.py │ │ │ │ ├── rating_methods │ │ │ │ │ ├── median_p_rating.py │ │ │ │ │ ├── o_rating.py │ │ │ │ │ ├── p_gamma_rating.py │ │ │ │ │ ├── p_rating.py │ │ │ │ │ ├── rating_method_frame.py │ │ │ │ │ ├── weighted_median_p_rating.py │ │ │ │ │ └── weighted_p_rating.py │ │ │ │ ├── settings.py │ │ │ │ └── wizard.py │ │ │ ├── classes │ │ │ │ ├── param_list_frame.py │ │ │ │ └── tooltip.py │ │ │ └── main.py │ │ ├── helpers │ │ │ └── bribeNetException.py │ │ └── prediction │ │ └── parameterPrediction.py │ └── docker_main.py └── test └── BribeNet ├── bribery │ ├── static │ │ ├── briberTestCase.py │ │ ├── test_oneMoveInfluentialNodeBriber.py │ │ ├── test_oneMoveRandomBriber.py │ │ └── test_randomBriber.py │ ├── temporal │ │ ├── action │ │ │ ├── test_briberyAction.py │ │ │ ├── test_multiBriberyAction.py │ │ │ └── test_singleBriberyAction.py │ │ ├── briberTestCase.py │ │ ├── test_budgetBriber.py │ │ ├── test_mostInfluentialBriber.py │ │ ├── test_oneMoveRandomBriber.py │ │ └── test_randomBriber.py │ └── test_briber.py ├── graph │ ├── generation │ │ ├── test_unweightedGenerator.py │ │ └── test_weightedGenerator.py │ ├── static │ │ ├── test_multiBriberRatingGraph.py │ │ ├── test_ratingGraphBuilder.py │ │ └── test_singleBriberRatingGraph.py │ ├── temporal │ │ ├── action │ │ │ └── test_customerAction.py │ │ ├── test_multiBriberRatingGraph.py │ │ ├── test_ratingGraphBuilder.py │ │ └── test_thresholdGraph.py │ └── test_ratingGraph.py └── prediction └── test_parameterPrediction.py
/setup.py
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="BribeNet", version="1.0.0", author="Robert Murray", author_email="R.Murray.1@warwick.ac.uk", description="Simulation of networks of bribers and consumers", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/RobMurray98/CS407Implementation", install_requires=[ 'matplotlib==3.1.2', 'networkit==6.1.0', 'networkx==2.4', 'snap==0.5', 'cython==0.29.14', 'numpy==1.17.4', 'pandas==0.25.3', 'pytest==5.3.0', 'ipython==7.13.0', 'pillow==7.0.0', 'weightedstats==0.4.1' ], package_data={'': ['*.png']}, include_package_data=True, package_dir={'': 'src'}, packages=setuptools.find_packages(where='src'), python_requires='>=3.7' )
/src/BribeNet/bribery/briber.py
from abc import ABC from typing import Optional from BribeNet.helpers.bribeNetException import BribeNetException class BriberyGraphNotSetException(BribeNetException): pass class BriberyGraphAlreadySetException(BribeNetException): pass class BriberNotRegisteredOnGraphException(BribeNetException): pass class GraphNotSubclassOfRatingGraphException(BribeNetException): pass class Briber(ABC): def __init__(self, u0: float): """ Abstract class for bribing actors :param u0: the initial utility available to the briber """ self._u = u0 from BribeNet.graph.ratingGraph import RatingGraph self._g: Optional[RatingGraph] = None def _set_graph(self, g): from BribeNet.graph.ratingGraph import RatingGraph if not issubclass(g.__class__, RatingGraph): raise GraphNotSubclassOfRatingGraphException(f"{g.__class__.__name__} is not a subclass of RatingGraph") if self._g is not None: raise BriberyGraphAlreadySetException() self._g = g def get_graph(self): return self._g def get_briber_id(self): if self._g is None: raise BriberyGraphNotSetException() g_bribers = self._g.get_bribers() if issubclass(g_bribers.__class__, Briber): return 0 for i, briber in enumerate(g_bribers): if briber is self: return i raise BriberNotRegisteredOnGraphException() def set_resources(self, u: float): self._u = u def add_resources(self, u: float): self._u += u def get_resources(self) -> float: return self._u def bribe(self, node_id: int, amount: float): if self._g is None: raise BriberyGraphNotSetException() if amount <= self._u: self._g.bribe(node_id, amount, self.get_briber_id()) self._u -= amount
/src/BribeNet/bribery/static/briber.py
from abc import ABC, abstractmethod from BribeNet.bribery.briber import Briber, BriberyGraphNotSetException from BribeNet.helpers.bribeNetException import BribeNetException class GraphNotSubclassOfStaticRatingGraphException(BribeNetException): pass class StaticBriber(Briber, ABC): """ Static bribers perform static bribery actions instantaneously on StaticRatingGraphs The abstract method next_bribe must be implemented to define the bribery action of the briber """ def __init__(self, u0: float): super().__init__(u0=u0) def _set_graph(self, g): from BribeNet.graph.static.ratingGraph import StaticRatingGraph if not issubclass(g.__class__, StaticRatingGraph): raise GraphNotSubclassOfStaticRatingGraphException(f"{g.__class__.__name__} is not a subclass of " "StaticRatingGraph") super()._set_graph(g) @abstractmethod def _next_bribe(self): """ Statically perform some bribery action on the graph """ raise NotImplementedError def next_bribe(self): if self.get_graph() is None: raise BriberyGraphNotSetException() self._next_bribe()
/src/BribeNet/bribery/static/influentialNodeBriber.py
from BribeNet.bribery.static.briber import StaticBriber from BribeNet.helpers.override import override class InfluentialNodeBriber(StaticBriber): def __init__(self, u0, k=0.1): super().__init__(u0) self._k = k # will be reassigned when graph set @override def _set_graph(self, g): super()._set_graph(g) # Make sure that k is set such that there are enough resources left to actually bribe people. self._k = min(self._k, 0.5 * (self.get_resources() / self.get_graph().customer_count())) def _next_bribe(self): for c in self.get_graph().get_customers(): reward = self.get_graph().is_influential(c, k=self._k, briber_id=self.get_briber_id()) if reward > 0: # max out customers rating self.bribe(c, self.get_graph().get_max_rating() - self.get_graph().get_vote(c))
/src/BribeNet/bribery/static/nonBriber.py
from BribeNet.bribery.static.briber import StaticBriber # performs no bribery class NonBriber(StaticBriber): def _next_bribe(self): pass
/src/BribeNet/bribery/static/oneMoveInfluentialNodeBriber.py
from BribeNet.bribery.briber import BriberyGraphNotSetException from BribeNet.bribery.static.briber import StaticBriber from BribeNet.helpers.override import override class OneMoveInfluentialNodeBriber(StaticBriber): def __init__(self, u0, k=0.1): super().__init__(u0) self.influencers = [] self._k = k # will be reassigned when graph set @override def _set_graph(self, g): super()._set_graph(g) # Make sure that k is set such that there are enough resources left to actually bribe people. self._k = min(self._k, 0.5 * (self.get_resources() / self.get_graph().customer_count())) # sets influencers to ordered list of most influential nodes def _get_influencers(self): if self.get_graph() is None: raise BriberyGraphNotSetException() self.influencers = [] for c in self.get_graph().get_customers(): reward = self.get_graph().is_influential(c, k=self._k, briber_id=self.get_briber_id()) if reward > 0: self.influencers.append((reward, c)) # Sort based on highest reward self.influencers = sorted(self.influencers, reverse=True) # returns node bribed number def _next_bribe(self): if self.get_graph() is None: raise BriberyGraphNotSetException() self.influencers = self._get_influencers() if self.influencers: (r, c) = self.influencers[0] self.bribe(c, self.get_graph().get_max_rating() - self.get_graph().get_vote(c)) return c else: return 0
/src/BribeNet/bribery/static/oneMoveRandomBriber.py
import random import numpy as np from BribeNet.bribery.static.briber import StaticBriber class OneMoveRandomBriber(StaticBriber): def _next_bribe(self): customers = self.get_graph().get_customers() # pick random customer from list c = random.choice(customers) max_rating = self.get_graph().get_max_rating() vote = self.get_graph().get_vote(c)[self.get_briber_id()] if np.isnan(vote): self.bribe(c, max_rating) else: self.bribe(c, max_rating - vote) return c
/src/BribeNet/bribery/static/randomBriber.py
import random from BribeNet.bribery.static.briber import StaticBriber DELTA = 0.001 # ensures total bribes do not exceed budget class RandomBriber(StaticBriber): def _next_bribe(self): customers = self.get_graph().get_customers() # array of random bribes bribes = [random.uniform(0.0, 1.0) for _ in customers] bribes = [b * (self.get_resources() - DELTA) / sum(bribes) for b in bribes] # enact bribes for i in customers: self.bribe(i, bribes[i])
/src/BribeNet/bribery/temporal/action/__init__.py
from BribeNet.helpers.bribeNetException import BribeNetException class BribeMustBeGreaterThanZeroException(BribeNetException): pass class NodeDoesNotExistException(BribeNetException): pass class BriberDoesNotExistException(BribeNetException): pass class BriberyActionExceedsAvailableUtilityException(BribeNetException): pass
/src/BribeNet/bribery/temporal/action/briberyAction.py
from abc import ABC, abstractmethod from typing import List from BribeNet.helpers.bribeNetException import BribeNetException class BriberyActionExecutedMultipleTimesException(BribeNetException): pass class BriberyActionTimeNotCorrectException(BribeNetException): pass class BriberyAction(ABC): def __init__(self, graph): from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph # local import to remove cyclic dependency from BribeNet.bribery.temporal.briber import GraphNotSubclassOfTemporalRatingGraphException if not issubclass(graph.__class__, TemporalRatingGraph): raise GraphNotSubclassOfTemporalRatingGraphException(f"{graph.__class__.__name__} is not a subclass of " "TemporalRatingGraph") self.graph = graph self.__time_step = self.graph.get_time_step() self.__performed = False @classmethod @abstractmethod def empty_action(cls, graph): raise NotImplementedError def perform_action(self): """ Perform the action safely :raises BriberyActionTimeNotCorrectException: if action not at same time step as graph :raises BriberyActionExecutedMultipleTimesException: if action already executed """ if not self.__performed: if self.__time_step == self.graph.get_time_step(): self._perform_action() self.__performed = True else: message = f"The time step of the TemporalRatingGraph ({self.graph.get_time_step()}) is not equal to " \ f"the intended execution time ({self.__time_step})" raise BriberyActionTimeNotCorrectException(message) else: raise BriberyActionExecutedMultipleTimesException() def get_time_step(self): return self.__time_step def get_performed(self): return self.__performed @abstractmethod def _perform_action(self): """ Perform the stored bribery actions simultaneously """ raise NotImplementedError @abstractmethod def is_bribed(self, node_id) -> (bool, List[int]): """ Determine if the bribery action results in a node being bribed this time step :param node_id: the node :return: whether the node is bribed this time step """ raise NotImplementedError
/src/BribeNet/bribery/temporal/action/multiBriberyAction.py
import sys from typing import Dict, Optional, List from BribeNet.bribery.temporal.action import BribeMustBeGreaterThanZeroException, NodeDoesNotExistException, \ BriberDoesNotExistException, BriberyActionExceedsAvailableUtilityException from BribeNet.bribery.temporal.action.briberyAction import BriberyAction from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.briber import GraphNotSubclassOfTemporalRatingGraphException from BribeNet.helpers.bribeNetException import BribeNetException class NoActionsToFormMultiActionException(BribeNetException): pass class BriberyActionsOnDifferentGraphsException(BribeNetException): pass class BriberyActionsAtDifferentTimesException(BribeNetException): pass class MultiBriberyAction(BriberyAction): def __init__(self, graph, bribes: Optional[Dict[int, Dict[int, float]]] = None): from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph if not issubclass(graph.__class__, TemporalRatingGraph): raise GraphNotSubclassOfTemporalRatingGraphException(f"{graph.__class__.__name__} is not a subclass of " "TemporalRatingGraph") super().__init__(graph=graph) if bribes is not None: for _, bribe in bribes.items(): for _, value in bribe.items(): if value < 0: raise BribeMustBeGreaterThanZeroException() self._bribes: Dict[int, Dict[int, float]] = bribes or {} @classmethod def empty_action(cls, graph): return cls(graph, None) @classmethod def make_multi_action_from_single_actions(cls, actions: List[SingleBriberyAction]): if not actions: raise NoActionsToFormMultiActionException() graph = actions[0].briber.get_graph() if not all(b.briber.get_graph() is graph for b in actions): raise BriberyActionsOnDifferentGraphsException() time_step = actions[0].get_time_step() if not all(b.get_time_step() == time_step for b in actions): raise BriberyActionsAtDifferentTimesException() return cls(graph=graph, bribes={b.briber.get_briber_id(): b.get_bribes() for b in actions}) def add_bribe(self, briber_id: int, node_id: int, bribe: float): if bribe <= 0: raise BribeMustBeGreaterThanZeroException() if node_id not in self.graph.get_customers(): raise NodeDoesNotExistException() if briber_id not in range(len(self.graph.get_bribers())): raise BriberDoesNotExistException() if briber_id in self._bribes: if node_id in self._bribes[briber_id]: print("WARNING: node bribed twice in single time step, combining...", file=sys.stderr) self._bribes[briber_id][node_id] += bribe else: self._bribes[briber_id][node_id] = bribe else: self._bribes[briber_id] = {node_id: bribe} def _perform_action(self): bribers = self.graph.get_bribers() for briber_id, bribe in self._bribes.items(): total_bribe_quantity = sum(bribe.values()) if total_bribe_quantity > bribers[briber_id].get_resources(): message = f"MultiBriberyAction exceeded resources available to briber {briber_id}: " \ f"{str(bribers[briber_id])} - {total_bribe_quantity} > {bribers[briber_id].get_resources()}" raise BriberyActionExceedsAvailableUtilityException(message) for briber_id, bribe in self._bribes.items(): for customer, value in bribe.items(): bribers[briber_id].bribe(node_id=customer, amount=value) def is_bribed(self, node_id): bribers = [] for briber_id in self._bribes: if node_id in self._bribes[briber_id]: bribers.append(briber_id) if not bribers: return False, bribers return True, bribers def get_bribes(self): return self._bribes
/src/BribeNet/bribery/temporal/action/singleBriberyAction.py
import sys from typing import Dict, Optional from BribeNet.bribery.temporal.action import BribeMustBeGreaterThanZeroException, NodeDoesNotExistException, \ BriberyActionExceedsAvailableUtilityException from BribeNet.bribery.temporal.action.briberyAction import BriberyAction class SingleBriberyAction(BriberyAction): def __init__(self, briber, bribes: Optional[Dict[int, float]] = None): from BribeNet.bribery.temporal.briber import TemporalBriber from BribeNet.graph.temporal.ratingGraph import BriberNotSubclassOfTemporalBriberException if not issubclass(briber.__class__, TemporalBriber): raise BriberNotSubclassOfTemporalBriberException(f"{briber.__class__.__name__} is not a subclass of " "TemporalBriber") super().__init__(graph=briber.get_graph()) if bribes is not None: for _, bribe in bribes.items(): if bribe < 0: raise BribeMustBeGreaterThanZeroException() self.briber = briber self._bribes: Dict[int, float] = bribes or {} self.__time = self.briber.get_graph().get_time_step() @classmethod def empty_action(cls, briber): return cls(briber, None) def add_bribe(self, node_id: int, bribe: float): if bribe < 0: raise BribeMustBeGreaterThanZeroException() if node_id not in self.briber.get_graph().get_customers(): raise NodeDoesNotExistException() if node_id in self._bribes: print(f"WARNING: node {node_id} bribed twice in single time step, combining...", file=sys.stderr) self._bribes[node_id] += bribe else: self._bribes[node_id] = bribe def _perform_action(self): if sum(self._bribes.values()) > self.briber.get_resources(): raise BriberyActionExceedsAvailableUtilityException() for customer, bribe in self._bribes.items(): self.briber.bribe(node_id=customer, amount=bribe) def is_bribed(self, node_id): if node_id in self._bribes: return True, [self.briber.get_briber_id()] return False, [] def get_bribes(self): return self._bribes
/src/BribeNet/bribery/temporal/briber.py
from abc import ABC, abstractmethod from BribeNet.bribery.briber import Briber, BriberyGraphNotSetException from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.helpers.bribeNetException import BribeNetException class GraphNotSubclassOfTemporalRatingGraphException(BribeNetException): pass class TemporalBriber(Briber, ABC): def __init__(self, u0: float): super().__init__(u0=u0) def _set_graph(self, g): from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph if not issubclass(g.__class__, TemporalRatingGraph): raise GraphNotSubclassOfTemporalRatingGraphException(f"{g.__class__.__name__} is not a subclass of " "TemporalRatingGraph") super()._set_graph(g) def next_action(self) -> SingleBriberyAction: if self.get_graph() is None: raise BriberyGraphNotSetException() return self._next_action() @abstractmethod def _next_action(self) -> SingleBriberyAction: """ Defines the temporal model behaviour """ raise NotImplementedError
/src/BribeNet/bribery/temporal/mostInfluentialNodeBriber.py
import sys import numpy as np from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.briber import TemporalBriber class MostInfluentialNodeBriber(TemporalBriber): def __init__(self, u0: float, k: float = 0.1, i: int = 7): """ Constructor :param u0: initial utility :param k: cost of information :param i: maximum loop iterations for finding most influential node """ super().__init__(u0) self._k = k self._c = 0 # current loop iteration self._i = i # maximum loop iterations for finding most influential node self._current_rating = None self._previous_rating = None self._max_rating_increase = 0 self._best_node = None self._next_node = 0 self._last_node = 0 self._info_gained = set() self._bribed = set() def _set_graph(self, g): super()._set_graph(g) # Make sure that k is set such that there are enough resources left to actually bribe people. self._k = min(self._k, 0.5 * (self.get_resources() / self.get_graph().customer_count())) def _bribe_to_max(self): bribe_to_max = self.get_graph().get_max_rating() - self.get_graph().get_vote(self._next_node)[self.get_briber_id()] if np.isnan(bribe_to_max): bribe_to_max = 1.0 return bribe_to_max def _next_action(self) -> SingleBriberyAction: """ Next action of briber, either to gain information or to fully bribe the most influential node :return: SingleBriberyAction for the briber to take in the next temporal time step """ self._current_rating = self.get_graph().eval_graph(self.get_briber_id()) if self._previous_rating is None: self._previous_rating = self._current_rating next_act = SingleBriberyAction(self) try: self._next_node = self.get_graph().get_random_customer(excluding=self._info_gained | self._bribed) except IndexError: print(f"WARNING: {self.__class__.__name__} found no influential nodes, not acting...", file=sys.stderr) return next_act if self._current_rating - self._previous_rating > self._max_rating_increase: self._best_node = self._last_node self._max_rating_increase = self._current_rating - self._previous_rating maximum_bribe = min(self.get_resources(), self._bribe_to_max()) if self._c >= self._i and self._best_node is not None and maximum_bribe > 0: next_act.add_bribe(self._best_node, maximum_bribe) self._bribed.add(self._best_node) self._info_gained = set() self._c = 0 self._max_rating_increase = 0 self._best_node = 0 else: if self._c >= self._i: print(f"WARNING: {self.__class__.__name__} has not found an influential node in {self._c} tries " f"(intended maximum tries {self._i}), continuing search...", file=sys.stderr) # Bid an information gaining bribe, which is at most k, but is # smaller if you need to bribe less to get to the full bribe # or don't have enough money to bid k. next_act.add_bribe(self._next_node, min(self._bribe_to_max(), min(self.get_resources(), self._k))) self._info_gained.add(self._next_node) self._c = self._c + 1 self._last_node = self._next_node self._previous_rating = self._current_rating return next_act
/src/BribeNet/bribery/temporal/nonBriber.py
from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.briber import TemporalBriber class NonBriber(TemporalBriber): def _next_action(self) -> SingleBriberyAction: return SingleBriberyAction(self)
/src/BribeNet/bribery/temporal/oneMoveEvenBriber.py
import random import numpy as np from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.briber import TemporalBriber class OneMoveEvenBriber(TemporalBriber): def _next_action(self) -> SingleBriberyAction: customers = self.get_graph().get_customers() # pick random customer from list c = random.choice(list(filter(lambda x: x % 2 == 0, customers))) max_rating = self.get_graph().get_max_rating() vote = self.get_graph().get_vote(c)[self.get_briber_id()] resources = self.get_resources() if np.isnan(vote): bribery_dict = {c: min(resources, max_rating)} else: bribery_dict = {c: min(resources, max_rating - vote)} return SingleBriberyAction(self, bribes=bribery_dict)
/src/BribeNet/bribery/temporal/pGreedyBriber.py
import sys import numpy as np from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.briber import TemporalBriber """ IMPORTANT! This briber cheats and uses the direct influential node information. This is for testing whether trust is powerful enough to beat P-greedy bribery even when the briber has perfect information. """ class PGreedyBriber(TemporalBriber): def __init__(self, u0: float): """ Constructor :param u0: initial utility """ super().__init__(u0) self._targets = [] self._index = 0 self._bribed = set() def _set_graph(self, g): super()._set_graph(g) def _get_influential_nodes(self, g): # noinspection PyProtectedMember influence_weights = [(n, g._get_influence_weight(n, self.get_briber_id())) for n in self._g.get_customers()] influence_weights = sorted(influence_weights, key=lambda x: x[1], reverse=True) self._targets = [n for (n, w) in influence_weights if w >= 1 and not n in self._bribed] def _next_action(self) -> SingleBriberyAction: """ Next action of briber, just bribe the next node as fully as you can. :return: SingleBriberyAction for the briber to take in the next temporal time step """ next_act = SingleBriberyAction(self) if self._index >= len(self._targets): self._get_influential_nodes(self._g) self._index = 0 if self._index < len(self._targets): # Bribe the next target as fully as you can. target = self._targets[self._index] target_vote = self._g.get_vote(target)[self.get_briber_id()] if np.isnan(target_vote): target_vote = 0 next_act.add_bribe(target, min(self.get_resources(), self._g.get_max_rating() - target_vote)) self._index += 1 self._bribed.add(target) else: print(f"WARNING: {self.__class__.__name__} found no influential nodes, not acting...", file=sys.stderr) return next_act
/src/BribeNet/bribery/temporal/randomBriber.py
import random from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.briber import TemporalBriber DELTA = 0.001 # ensures total bribes do not exceed budget class RandomBriber(TemporalBriber): def _next_action(self) -> SingleBriberyAction: customers = self.get_graph().get_customers() # array of random bribes bribes = [random.uniform(0.0, 1.0) for _ in customers] bribes = [b * (max(0.0, self.get_resources() - DELTA)) / sum(bribes) for b in bribes] bribery_dict = {i: bribes[i] for i in customers} return SingleBriberyAction(self, bribes=bribery_dict)
/src/BribeNet/graph/generation/__init__.py
import enum from networkit.generators import WattsStrogatzGenerator, BarabasiAlbertGenerator from BribeNet.graph.generation.algo.compositeGenerator import CompositeGenerator from BribeNet.helpers.bribeNetException import BribeNetException class GraphGenerationAlgoNotDefinedException(BribeNetException): pass @enum.unique class GraphGeneratorAlgo(enum.Enum): """ Enum of usable NetworKit graph generation algorithms """ WATTS_STROGATZ = 0 BARABASI_ALBERT = 1 COMPOSITE = 2 def algo_to_constructor(g: GraphGeneratorAlgo): """ Conversion method from an instance of the GraphGeneratorAlgo enum to a instantiable NetworKit generator class :param g: the algorithm :return: the relevant NetworKit generator class :raises GraphGenerationAlgoNotDefinedException: if g is not a member of the GraphGeneratorAlgo enum """ if g == GraphGeneratorAlgo.WATTS_STROGATZ: return WattsStrogatzGenerator if g == GraphGeneratorAlgo.BARABASI_ALBERT: return BarabasiAlbertGenerator if g == GraphGeneratorAlgo.COMPOSITE: return CompositeGenerator # Add more algorithms here if needed raise GraphGenerationAlgoNotDefinedException(f"{g} is not a member of the GraphGeneratorAlgo enum")
/src/BribeNet/graph/generation/algo/compositeGenerator.py
from math import floor, log, ceil from random import gauss, sample, random import networkit as nk # noinspection PyUnresolvedReferences from networkit import Graph from networkit.generators import BarabasiAlbertGenerator, WattsStrogatzGenerator def _make_complete(n: int): g_ = Graph(n) for i in g_.iterNodes(): for j in g_.iterNodes(): if i < j: g_.addEdge(i, j) return g_ class CompositeGenerator(object): """ Pretend to extend inaccessible networkit._NetworKit.StaticGraphGenerator """ def __init__(self, n: int, community_count: int, small_world_neighbours: int, rewiring: float, scale_free_k: int, probability_reduce: float = 0.05): self._n = n self._community_count = community_count self._small_world_neighbours = small_world_neighbours self._rewiring = rewiring self._scale_free_k = scale_free_k self._probability_reduce = probability_reduce def generate(self): # First, generate a scale free network, which acts as our community network. communities = BarabasiAlbertGenerator(self._scale_free_k, self._community_count, 4, True).generate() small_world_graphs = {} node_count = communities.numberOfNodes() community_size = self._n / self._community_count # Then generate a small world graph for each node with size decided # by a Gaussian distribution around the average node size. i = node_count - 1 for node in communities.iterNodes(): local_size = gauss(community_size, community_size / 3) # Choose local_n such that all communities have size at least two. local_n = max(min(round(local_size), self._n - (2 * i)), 2) # If it's the last iteration, we much "use up" the rest of the nodes. if i == 0: local_n = self._n # For a random graph to be connected, we require that 2k >> ln(n). # (2k because of how NetworKit defines k.) # => k < (n-1)/2 connectivity = max(self._small_world_neighbours, floor(log(local_n))) # However, we also require that 2k < n-1, since otherwise you end # up with double links. connectivity = max(0, min(ceil((local_n - 1) / 2) - 1, connectivity)) if local_n > 3: # Sometimes WattsStrogatzGenerators return unconnected graphs. # This is due to the fact that 2k >> ln(n) is vague, and also # bounded above by 2k < n-1. # Therefore, we repeat the process until a connected graph is # created. This shouldn't loop too many times. is_connected = False while not is_connected: small_world_graphs[node] = WattsStrogatzGenerator(local_n, connectivity, self._rewiring).generate() # noinspection PyUnresolvedReferences connected_components = nk.components.ConnectedComponents(small_world_graphs[node]).run() is_connected = connected_components.numberOfComponents() == 1 else: small_world_graphs[node] = _make_complete(local_n) self._n -= local_n i -= 1 # Build a merged graph. big_graph = Graph(0, False, False) ranges = [0] partition = [] neighbours = [list(communities.neighbors(node)) for node in communities.iterNodes()] # To avoid neighbour sets having edges going both ways, delete references to nodes larger than themselves. for n in range(len(neighbours)): neighbours[n] = list(filter(lambda x: x < n, neighbours[n])) for graph in small_world_graphs.values(): # noinspection PyUnresolvedReferences nk.graphtools.append(big_graph, graph) ranges.append(big_graph.numberOfNodes()) partition.append(list(range(ranges[-2], ranges[-1]))) # Finally, connect these small world graphs where their parent nodes are connected. for i in range(len(neighbours)): for j in neighbours[i]: # Connect partitions i and j n1 = partition[i] n2 = partition[j] p = 1.0 for nc1 in sample(n1, len(n1)): for nc2 in sample(n2, len(n2)): # Connect with probability p if random() <= p: big_graph.addEdge(nc1, nc2) p = p * self._probability_reduce return big_graph if __name__ == '__main__': import matplotlib.pyplot as plt from networkit.viztasks import drawGraph g = CompositeGenerator(4000, 15, 50, 0.1, 2).generate() drawGraph(g) plt.show()
/src/BribeNet/graph/generation/flatWeightGenerator.py
import networkit as nk import networkit.nxadapter as adap from BribeNet.graph.generation import GraphGeneratorAlgo from BribeNet.graph.generation.weightedGenerator import WeightedGraphGenerator class FlatWeightedGraphGenerator(WeightedGraphGenerator): def __init__(self, a: GraphGeneratorAlgo, *args, **kwargs): super().__init__(a, *args, **kwargs) # Networkit does not let you add weights to a previously unweighted graph. # Thus we convert it to a Networkx graph, add weights and then revert. def generate(self) -> nk.graph: nxg = adap.nk2nx(self._generator.generate()) for (u, v) in nxg.edges(): nxg[u][v]['weight'] = 1.0 return adap.nx2nk(nxg, 'weight')
/src/BribeNet/graph/generation/generator.py
import abc # noinspection PyUnresolvedReferences from networkit import Graph from BribeNet.graph.generation import GraphGeneratorAlgo, algo_to_constructor class GraphGenerator(abc.ABC): def __init__(self, a: GraphGeneratorAlgo, *args, **kwargs): """ Thin wrapper class for NetworKit graph generation algorithms :param a: the GraphGenerationAlgo to use :param args: any arguments to this generator :param kwargs: any keyword arguments to this generator """ self._algo = a self._args = args self._kwargs = kwargs self._generator = algo_to_constructor(self._algo)(*args, **kwargs) @abc.abstractmethod def generate(self) -> Graph: """ Call generate on the generator defined by this class and perform any additional actions :return: a NetworKit Graph """ raise NotImplementedError
/src/BribeNet/graph/generation/unweightedGenerator.py
from BribeNet.graph.generation import GraphGeneratorAlgo from BribeNet.graph.generation.generator import GraphGenerator class UnweightedGraphGenerator(GraphGenerator): def __init__(self, a: GraphGeneratorAlgo, *args, **kwargs): super().__init__(a, *args, **kwargs) def generate(self): return self._generator.generate()
/src/BribeNet/graph/generation/weightedGenerator.py
import abc from BribeNet.graph.generation import GraphGeneratorAlgo from BribeNet.graph.generation.generator import GraphGenerator class WeightedGraphGenerator(GraphGenerator, abc.ABC): def __init__(self, a: GraphGeneratorAlgo, *args, **kwargs): """ Thin wrapper class for NetworKit graph generation algorithms which add weights to edges :param a: the GraphGenerationAlgo to use :param args: any arguments to this generator :param kwargs: any keyword arguments to this generator """ super().__init__(a, *args, **kwargs)
/src/BribeNet/graph/ratingGraph.py
import random from abc import ABC from copy import deepcopy from typing import Tuple, Optional, List, Any, Set import networkit as nk import numpy as np from weightedstats import weighted_mean, weighted_median, mean, median from BribeNet.graph.generation import GraphGeneratorAlgo from BribeNet.graph.generation.flatWeightGenerator import FlatWeightedGraphGenerator from BribeNet.graph.generation.generator import GraphGenerator from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.helpers.bribeNetException import BribeNetException DEFAULT_GEN = FlatWeightedGraphGenerator(GraphGeneratorAlgo.WATTS_STROGATZ, 30, 5, 0.3) MAX_RATING = 1.0 MAX_DIFF = 0.6 class BribersAreNotTupleException(BribeNetException): pass class NoBriberGivenException(BribeNetException): pass class BriberNotSubclassOfBriberException(BribeNetException): pass class VotesNotInstantiatedBySpecificsException(BribeNetException): pass class TruthsNotInstantiatedBySpecificsException(BribeNetException): pass class GammaNotSetException(BribeNetException): pass class RatingGraph(ABC): """ Representation of network graph which bribers interact with """ def __init__(self, bribers: Tuple[Any], generator: GraphGenerator = DEFAULT_GEN, specifics=None, **kwargs): """ Abstract class for rating graphs :param bribers: the bribing actors on the graph :param generator: the graph generator used to instantiate the graph :param specifics: function in implementing class to call after the superclass initialisation, but prior to _finalise_init (template design pattern) :param **kwargs: additional keyword arguments to the graph, such as max_rating """ # Generate random ratings network self._g = generator.generate() from BribeNet.bribery.briber import Briber if issubclass(bribers.__class__, Briber): bribers = tuple([bribers]) if not isinstance(bribers, tuple): raise BribersAreNotTupleException() if not bribers: raise NoBriberGivenException() for b in bribers: if not issubclass(b.__class__, Briber): raise BriberNotSubclassOfBriberException(f"{b.__class__.__name__} is not a subclass of Briber") self._bribers = bribers self._max_rating: float = MAX_RATING self._votes: np.ndarray[Optional[float]] = None self._truths: np.ndarray[float] = None self._rating_method: RatingMethod = RatingMethod.P_RATING self._gamma: Optional[float] = None if specifics is not None: specifics() self._finalise_init() def _finalise_init(self): """ Perform assertions that ensure everything is initialised """ if not isinstance(self._bribers, tuple): raise BribersAreNotTupleException("specifics of implementing class did not instantiate self._bribers " "as a tuple") from BribeNet.bribery.briber import Briber for briber in self._bribers: if not issubclass(briber.__class__, Briber): raise BriberNotSubclassOfBriberException(f"{briber.__class__.__name__} is not a subclass of Briber") # noinspection PyProtectedMember briber._set_graph(self) if not isinstance(self._votes, np.ndarray): raise VotesNotInstantiatedBySpecificsException() if not isinstance(self._truths, np.ndarray): raise TruthsNotInstantiatedBySpecificsException() def get_bribers(self) -> Tuple[Any]: """ Get the bribers active on the graph :return: the bribers """ return self._bribers def get_max_rating(self) -> float: """ Get the maximum rating :return: the maximum rating """ return self._max_rating def set_rating_method(self, rating_method: RatingMethod): """ Set the rating method being used :param rating_method: the rating method to use """ self._rating_method = rating_method def set_gamma(self, gamma: float): """ Set gamma which is used as the dampening factor in P-gamma-rating :param gamma: the dampening factor in P-gamma-rating """ self._gamma = gamma def get_rating(self, node_id: int = 0, briber_id: int = 0, rating_method: Optional[RatingMethod] = None, nan_default: Optional[int] = None): """ Get the rating for a certain node and briber, according to the set rating method :param node_id: the node to find the rating of (can be omitted for O-rating) :param briber_id: the briber to find the rating of (can be omitted in single-briber rating graphs) :param rating_method: a rating method to override the current set rating method if not None :param nan_default: optional default integer value to replace np.nan as default return :return: the rating """ rating_method_used = rating_method or self._rating_method rating = np.nan if rating_method_used == RatingMethod.O_RATING: rating = self._o_rating(briber_id) elif rating_method_used == RatingMethod.P_RATING: rating = self._p_rating(node_id, briber_id) elif rating_method_used == RatingMethod.MEDIAN_P_RATING: rating = self._median_p_rating(node_id, briber_id) elif rating_method_used == RatingMethod.SAMPLE_P_RATING: rating = self._sample_p_rating(node_id, briber_id) elif rating_method_used == RatingMethod.WEIGHTED_P_RATING: rating = self._p_rating_weighted(node_id, briber_id) elif rating_method_used == RatingMethod.WEIGHTED_MEDIAN_P_RATING: rating = self._median_p_rating_weighted(node_id, briber_id) elif rating_method_used == RatingMethod.P_GAMMA_RATING: if self._gamma is None: raise GammaNotSetException() rating = self._p_gamma_rating(node_id, briber_id, self._gamma) if np.isnan(rating) and nan_default is not None: rating = nan_default return rating def get_graph(self): """ Return the NetworKit graph of the network Ensure this information isn't used by a briber to "cheat" :return: the graph """ return self._g def _neighbours(self, node_id: int, briber_id: int = 0) -> List[int]: """ Get the voting neighbours of a node :param node_id: the node to get neighbours of :param briber_id: the briber on which voting has been done :return: the voting neighbours of the node for the briber """ return [n for n in self.get_graph().neighbors(node_id) if not np.isnan(self._votes[n][briber_id])] def get_customers(self) -> List[int]: """ Get the customer ids without knowledge of edges or ratings :return: the customer ids in the graph """ return list(self.get_graph().iterNodes()) def customer_count(self) -> int: """ Get the number of customers :return: the number of nodes in the graph """ return self.get_graph().numberOfNodes() def get_random_customer(self, excluding: Optional[Set[int]] = None) -> int: """ Gets the id of a random customer :param excluding: set of customer ids not to be returned :return: random node id in the graph """ if excluding is None: excluding = set() return random.choice(tuple(set(self.get_graph().iterNodes()) - excluding)) def get_vote(self, idx: int): """ Returns the vote of a voter in the current network state :param idx: the id of the voter :return: np.nan if non-voter, otherwise float if single briber, np.ndarray of floats if multiple bribers """ return self._votes[idx] def _p_rating(self, node_id: int, briber_id: int = 0): """ Get the P-rating for the node :param node_id: the id of the node :param briber_id: the id number of the briber :return: mean of actual rating of neighbouring voters """ ns = self._neighbours(node_id, briber_id) if len(ns) == 0: return np.nan return mean([self.get_vote(n)[briber_id] for n in ns]) def _p_rating_weighted(self, node_id: int, briber_id: int = 0): """ Get the P-rating for the node, weighted based on trust :param node_id: the id of the node :param briber_id: the id number of the briber :return: mean of actual rating of neighbouring voters """ ns = self._neighbours(node_id, briber_id) if len(ns) == 0: return np.nan weights = [self.get_weight(n, node_id) for n in ns] votes = [self.get_vote(n)[briber_id] for n in ns] return weighted_mean(votes, weights) def _median_p_rating(self, node_id: int, briber_id: int = 0): """ Get the median-based P-rating for the node :param node_id: the id of the node :param briber_id: the id number of the briber :return: median of actual rating of neighbouring voters """ ns = self._neighbours(node_id, briber_id) if len(ns) == 0: return np.nan return median([self.get_vote(n)[briber_id] for n in ns]) def _median_p_rating_weighted(self, node_id: int, briber_id: int = 0): """ Get the median-based P-rating for the node, weighted based on trust :param node_id: the id of the node :param briber_id: the id number of the briber :return: median of actual rating of neighbouring voters """ ns = self._neighbours(node_id, briber_id) if len(ns) == 0: return np.nan weights = [self.get_weight(n, node_id) for n in ns] votes = [self.get_vote(n)[briber_id] for n in ns] return weighted_median(votes, weights) def _sample_p_rating(self, node_id: int, briber_id: int = 0): """ Get the sample-based P-rating for the node :param node_id: the id of the node :param briber_id: the id number of the briber :return: mean of a sample of actual rating of neighbouring voters """ ns = self._neighbours(node_id, briber_id) if len(ns) == 0: return np.nan sub = random.sample(ns, random.randint(1, len(ns))) return mean([self.get_vote(n)[briber_id] for n in sub]) def _o_rating(self, briber_id: int = 0): """ Get the O-rating for the node :param briber_id: the id number of the briber :return: mean of all actual ratings """ ns = [n for n in self.get_graph().iterNodes() if not np.isnan(self._votes[n][briber_id])] if len(ns) == 0: return np.nan return mean([self.get_vote(n)[briber_id] for n in ns]) def _p_gamma_rating(self, node_id: int, briber_id: int = 0, gamma: float = 0.05): """ Get the P-gamma-rating for the node, which weights nodes based on the gamma factor: The gamma factor is defined as gamma^(D(n,c) - 1), where n is our starting node, c is the node we are considering and D(n,c) is the shortest distance. :param briber_id: the id number of the briber :return: weighted mean of all actual ratings based on the gamma factor """ ns = [n for n in self._g.iterNodes() if (not np.isnan(self._votes[n][briber_id])) and n != node_id] # noinspection PyUnresolvedReferences unweighted_g = nk.graphtools.toUnweighted(self.get_graph()) # noinspection PyUnresolvedReferences bfs_run = nk.distance.BFS(unweighted_g, node_id).run() distances = bfs_run.getDistances() weights = [gamma ** (distances[n] - 1) for n in ns] votes = [self.get_vote(n)[briber_id] for n in ns] return weighted_mean(votes, weights) def is_influential(self, node_id: int, k: float = 0.1, briber_id: int = 0, rating_method: Optional[RatingMethod] = None, charge_briber: bool = True) -> float: """ Determines if a node is influential using a small bribe :param node_id: the id of the node :param k: the cost of information :param briber_id: the briber for which the node may be influential :param rating_method: a rating method to override the current set rating method if not None :param charge_briber: whether this query is being made by a briber who must be charged and the ratings adjusted :return: float > 0 if influential, 0 otherwise """ prev_p = self.eval_graph(briber_id, rating_method) vote = self.get_vote(node_id)[briber_id] if (not np.isnan(vote)) and (vote < 1 - k): if charge_briber: # bribe via the briber in order to charge their utility self._bribers[briber_id].bribe(node_id, k) reward = self.eval_graph(briber_id, rating_method) - prev_p - k else: # "bribe" directly on the graph, not charging the briber and not affecting ratings g_ = deepcopy(self) g_.bribe(node_id, k, briber_id) reward = g_.eval_graph(briber_id, rating_method) - prev_p - k if reward > 0: return reward return 0.0 def _get_influence_weight(self, node_id: int, briber_id: Optional[int] = 0): """ Get the influence weight of a node in the graph, as defined by Grandi and Turrini. :param node_id: the node to fetch the influence weight of :param briber_id: the briber (determines which neighbours have voted) :return: the influence weight of the node """ neighbourhood_sizes = [len(self._neighbours(n, briber_id)) for n in self._neighbours(node_id, briber_id)] neighbour_weights = [1.0 / n for n in neighbourhood_sizes if n > 0] # discard size 0 neighbourhoods return sum(neighbour_weights) def bribe(self, node_id, b, briber_id=0): """ Increase the rating of a node by an amount, capped at the max rating :param node_id: the node to bribe :param b: the amount to bribe the node :param briber_id: the briber who's performing the briber """ if not np.isnan(self._votes[node_id][briber_id]): self._votes[node_id][briber_id] = min(self._max_rating, self._votes[node_id][briber_id] + b) else: self._votes[node_id][briber_id] = min(self._max_rating, b) def eval_graph(self, briber_id=0, rating_method=None): """ Metric to determine overall rating of the graph :param rating_method: a rating method to override the current set rating method if not None :param briber_id: the briber being considered in the evaluation :return: the sum of the rating across the network """ return sum(self.get_rating(node_id=n, briber_id=briber_id, rating_method=rating_method, nan_default=0) for n in self.get_graph().iterNodes()) def average_rating(self, briber_id=0, rating_method=None): voting_customers = [c for c in self.get_graph().iterNodes() if not np.isnan(self.get_vote(c))[briber_id]] return self.eval_graph(briber_id, rating_method) / len(voting_customers) def set_weight(self, node1_id: int, node2_id: int, weight: float): """ Sets a weight for a given edge, thus allowing for trust metrics to affect graph structure. :param node1_id: the first node of the edge :param node2_id: the second node of the edge :param weight: the weight of the edge to set """ self.get_graph().setWeight(node1_id, node2_id, weight) def get_weight(self, node1_id: int, node2_id: int) -> float: """ Gets the weight of a given edge. :param node1_id: the first node of the edge :param node2_id: the second node of the edge """ return self.get_graph().weight(node1_id, node2_id) def get_edges(self) -> [(int, int)]: return list(self.get_graph().iterEdges()) def trust(self, node1_id: int, node2_id: int) -> float: """ Determines the trust of a given edge, which is a value from 0 to 1. This uses the average of the difference in vote between each pair of places. :param node1_id: the first node of the edge :param node2_id: the second node of the edge """ votes1 = self.get_vote(node1_id) votes2 = self.get_vote(node2_id) differences = votes1 - votes2 nans = np.isnan(differences) differences[nans] = 0 differences = np.square(differences) trust = 1 - (np.sum(differences) / (len(differences) * MAX_DIFF ** 2)) return max(0, min(1, trust)) def average_trust(self): """ Average trust value for all pairs of nodes """ trusts = [self.get_weight(a, b) for (a, b) in self.get_graph().iterEdges()] return np.mean(trusts) def __copy__(self): """ copy operation. :return: A shallow copy of the instance """ cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result def __deepcopy__(self, memo=None): """ deepcopy operation. :param memo: the memo dictionary :return: A deep copy of the instance """ if memo is None: memo = {} cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): # noinspection PyArgumentList setattr(result, k, deepcopy(v, memo)) return result
/src/BribeNet/graph/ratingMethod.py
import enum @enum.unique class RatingMethod(enum.Enum): O_RATING = 0 P_RATING = 1 MEDIAN_P_RATING = 2 SAMPLE_P_RATING = 3 P_GAMMA_RATING = 4 WEIGHTED_P_RATING = 5 WEIGHTED_MEDIAN_P_RATING = 6
/src/BribeNet/graph/static/ratingGraph.py
import random from typing import Tuple, Union, Any import numpy as np from BribeNet.graph.ratingGraph import RatingGraph, DEFAULT_GEN, BribersAreNotTupleException, NoBriberGivenException from BribeNet.helpers.bribeNetException import BribeNetException from BribeNet.helpers.override import override DEFAULT_NON_VOTER_PROPORTION = 0.2 class BriberNotSubclassOfStaticBriberException(BribeNetException): pass class StaticRatingGraph(RatingGraph): def __init__(self, bribers: Union[Tuple[Any], Any], generator=DEFAULT_GEN, **kwargs): from BribeNet.bribery.static.briber import StaticBriber if issubclass(bribers.__class__, StaticBriber): bribers = tuple([bribers]) if not isinstance(bribers, tuple): raise BribersAreNotTupleException() if not bribers: raise NoBriberGivenException() for b in bribers: if not issubclass(b.__class__, StaticBriber): raise BriberNotSubclassOfStaticBriberException(f"{b.__class__.__name__} is not a subclass of " "StaticBriber") self.__tmp_bribers = bribers self.__tmp_kwargs = kwargs super().__init__(bribers, generator=generator, specifics=self.__specifics, **kwargs) @override def _finalise_init(self): """ Perform assertions that ensure everything is initialised """ from BribeNet.bribery.static.briber import StaticBriber for briber in self._bribers: if not issubclass(briber.__class__, StaticBriber): raise BriberNotSubclassOfStaticBriberException(f"{briber.__class__.__name__} is not a subclass of " "StaticBriber") super()._finalise_init() def __specifics(self): from BribeNet.bribery.static.briber import StaticBriber self._bribers: Tuple[StaticBriber] = self.__tmp_bribers # noinspection PyTypeChecker self._votes = np.zeros((self.get_graph().numberOfNodes(), len(self._bribers))) self._truths = np.zeros((self.get_graph().numberOfNodes(), len(self._bribers))) # Generate random ratings network if "non_voter_proportion" in self.__tmp_kwargs: non_voter_proportion = self.__tmp_kwargs["non_voter_proportion"] else: non_voter_proportion = DEFAULT_NON_VOTER_PROPORTION for n in self.get_graph().iterNodes(): for b, _ in enumerate(self._bribers): rating = random.uniform(0, self._max_rating) self._truths[n][b] = rating if random.random() > non_voter_proportion: self._votes[n][b] = rating else: self._votes[n][b] = np.nan del self.__tmp_bribers, self.__tmp_kwargs
/src/BribeNet/graph/static/ratingGraphBuilder.py
import enum import sys from typing import List from BribeNet.bribery.briber import Briber from BribeNet.bribery.static.influentialNodeBriber import InfluentialNodeBriber from BribeNet.bribery.static.mostInfluentialNodeBriber import MostInfluentialNodeBriber from BribeNet.bribery.static.nonBriber import NonBriber from BribeNet.bribery.static.oneMoveInfluentialNodeBriber import OneMoveInfluentialNodeBriber from BribeNet.bribery.static.oneMoveRandomBriber import OneMoveRandomBriber from BribeNet.bribery.static.randomBriber import RandomBriber from BribeNet.graph.ratingGraph import DEFAULT_GEN from BribeNet.graph.static.ratingGraph import StaticRatingGraph @enum.unique class BriberType(enum.Enum): Non = 0 Random = 1 OneMoveRandom = 2 InfluentialNode = 3 MostInfluentialNode = 4 OneMoveInfluentialNode = 5 @classmethod def get_briber_constructor(cls, idx, *args, **kwargs): c = None if idx == cls.Non: c = NonBriber if idx == cls.Random: c = RandomBriber if idx == cls.OneMoveRandom: c = OneMoveRandomBriber if idx == cls.InfluentialNode: c = InfluentialNodeBriber if idx == cls.MostInfluentialNode: c = MostInfluentialNodeBriber if idx == cls.OneMoveInfluentialNode: c = OneMoveInfluentialNodeBriber return lambda u0: c(u0, *args, **kwargs) class RatingGraphBuilder(object): def __init__(self): self.bribers: List[Briber] = [] self.generator = DEFAULT_GEN def add_briber(self, briber: BriberType, u0: int = 0, *args, **kwargs): self.bribers.append(BriberType.get_briber_constructor(briber, *args, **kwargs)(u0)) return self def set_generator(self, generator): self.generator = generator return self def build(self) -> StaticRatingGraph: if not self.bribers: print("WARNING: StaticRatingGraph built with no bribers. Using NonBriber...", file=sys.stderr) return StaticRatingGraph(tuple([NonBriber(0)])) return StaticRatingGraph(tuple(self.bribers))
/src/BribeNet/graph/temporal/action/actionType.py
import enum @enum.unique class ActionType(enum.Enum): NONE = 0 BRIBED = 1 SELECT = 2
/src/BribeNet/graph/temporal/action/customerAction.py
from typing import Dict, Any, Tuple, List import numpy as np from BribeNet.bribery.temporal.action.briberyAction import BriberyAction from BribeNet.bribery.temporal.briber import GraphNotSubclassOfTemporalRatingGraphException from BribeNet.graph.temporal.action.actionType import ActionType from BribeNet.helpers.bribeNetException import BribeNetException class CustomerActionExecutedMultipleTimesException(BribeNetException): pass class CustomerActionTimeNotCorrectException(BribeNetException): pass class CustomerAction(object): def __init__(self, graph): from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph # local import to remove cyclic dependency if not issubclass(graph.__class__, TemporalRatingGraph): raise GraphNotSubclassOfTemporalRatingGraphException(f"{graph.__class__.__name__} is not a subclass of " "TemporalRatingGraph") self.graph = graph self.actions: Dict[int, Tuple[ActionType, Any]] = {c: (ActionType.NONE, None) for c in self.graph.get_customers()} self.__time_step = self.graph.get_time_step() self.__performed = False @classmethod def empty_action(cls, graph): return cls(graph) def get_time_step(self): return self.__time_step def get_performed(self): return self.__performed def get_action_type(self, node_id: int): return self.actions[node_id][0] def set_bribed(self, node_id: int, briber_ids: List[int]): self.actions[node_id] = (ActionType.BRIBED, briber_ids) def set_none(self, node_id: int): self.actions[node_id] = (ActionType.NONE, 0) def set_select(self, node_id: int, briber_id): self.actions[node_id] = (ActionType.SELECT, briber_id) def set_bribed_from_bribery_action(self, bribery_action: BriberyAction): for c in self.actions: bribed, bribers = bribery_action.is_bribed(c) if bribed: self.set_bribed(c, bribers) # noinspection PyProtectedMember def perform_action(self, pay: float): """ Perform the described action on the graph :param pay: the amount to increase a selected briber's utility """ if not self.__performed: if self.__time_step == self.graph.get_time_step(): for c in self.actions: if self.actions[c][0] == ActionType.SELECT: selected = self.actions[c][1] if np.isnan(self.graph._votes[c][selected]): # no previous vote or bribe self.graph._votes[c][selected] = self.graph._truths[c][selected] self.graph._bribers[selected].add_resources(pay) self.__performed = True else: message = f"The time step of the TemporalRatingGraph ({self.graph.get_time_step()}) is not equal to " \ f"the intended execution time ({self.__time_step})" raise CustomerActionTimeNotCorrectException(message) else: raise CustomerActionExecutedMultipleTimesException()
/src/BribeNet/graph/temporal/noCustomerActionGraph.py
from BribeNet.graph.ratingGraph import DEFAULT_GEN from BribeNet.graph.temporal.action.customerAction import CustomerAction from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph class NoCustomerActionGraph(TemporalRatingGraph): """ A temporal rating graph solely for testing purposes. """ def __init__(self, bribers, generator=DEFAULT_GEN, **kwargs): super().__init__(bribers, generator=generator, **kwargs) def _customer_action(self): return CustomerAction(self)
/src/BribeNet/graph/temporal/ratingGraph.py
import abc import random from sys import maxsize from typing import Tuple, Union, Any, Optional, List import numpy as np from BribeNet.bribery.temporal.action.briberyAction import BriberyAction from BribeNet.bribery.temporal.action.multiBriberyAction import MultiBriberyAction from BribeNet.graph.ratingGraph import DEFAULT_GEN, RatingGraph, BribersAreNotTupleException, NoBriberGivenException from BribeNet.graph.static.ratingGraph import DEFAULT_NON_VOTER_PROPORTION # (0.2) from BribeNet.graph.temporal.action.customerAction import CustomerAction from BribeNet.graph.temporal.weighting.traverseWeighting import assign_traverse_averaged from BribeNet.helpers.bribeNetException import BribeNetException from BribeNet.helpers.override import override DEFAULT_REMOVE_NO_VOTE = False DEFAULT_Q = 0.5 DEFAULT_PAY = 1.0 DEFAULT_APATHY = 0.0 DEFAULT_D = 2 # number of rounds in a cycle (D-1 bribes and then one customer round) DEFAULT_TRUE_AVERAGE = 0.5 DEFAULT_TRUE_STD_DEV = 0.2 DEFAULT_LEARNING_RATE = 0.1 KWARG_NAMES = ("non_voter_proportion", "remove_no_vote", "q", "pay", "apathy", "d", "learning_rate") KWARG_LOWER_BOUNDS = dict(zip(KWARG_NAMES, (0, False, 0, 0, 0, 2, 0))) KWARG_UPPER_BOUNDS = dict(zip(KWARG_NAMES, (1, True, 1, float('inf'), 1, maxsize, 1))) MIN_TRUE_AVERAGE = 0.0 MAX_TRUE_AVERAGE = 1.0 MIN_TRUE_STD_DEV = 0.0 MAX_TRUE_STD_DEV = float('inf') class BriberNotSubclassOfTemporalBriberException(BribeNetException): pass class BriberKeywordArgumentOutOfBoundsException(BribeNetException): pass class TrueAverageIncorrectShapeException(BribeNetException): pass class TrueStdDevIncorrectShapeException(BribeNetException): pass class TemporalRatingGraph(RatingGraph, abc.ABC): def __init__(self, bribers: Union[Tuple[Any], Any], generator=DEFAULT_GEN, **kwargs): from BribeNet.bribery.temporal.briber import TemporalBriber if issubclass(bribers.__class__, TemporalBriber): bribers = tuple([bribers]) if not isinstance(bribers, tuple): raise BribersAreNotTupleException("bribers must be a tuple of instances of subclasses of TemporalBriber") if not bribers: raise NoBriberGivenException("must be at least one briber") for b in bribers: if not issubclass(b.__class__, TemporalBriber): raise BriberNotSubclassOfTemporalBriberException(f"{b.__class__.__name__} is not a subclass of " "TemporalBriber") self.__tmp_bribers = bribers self.__tmp_kwargs = kwargs self._time_step: int = 0 super().__init__(bribers, generator, specifics=self.__specifics, **kwargs) # must come after super().__init__() such that bribers[0] has graph set if len(bribers) == 1: self._last_bribery_actions: List[BriberyAction] = [] self._last_customer_action: Optional[CustomerAction] = CustomerAction.empty_action(self) else: self._last_bribery_actions: List[BriberyAction] = [] self._last_customer_action: Optional[CustomerAction] = CustomerAction.empty_action(self) @staticmethod def kwarg_in_bounds(k, v): return KWARG_LOWER_BOUNDS[k] <= v <= KWARG_UPPER_BOUNDS[k] def __specifics(self): self._votes = np.zeros((self.get_graph().numberOfNodes(), len(self._bribers))) self._truths = np.zeros((self.get_graph().numberOfNodes(), len(self._bribers))) for kwarg in KWARG_NAMES: if kwarg in self.__tmp_kwargs: if not self.kwarg_in_bounds(kwarg, self.__tmp_kwargs[kwarg]): raise BriberKeywordArgumentOutOfBoundsException( f"{kwarg}={self.__tmp_kwargs[kwarg]} out of bounds ({KWARG_LOWER_BOUNDS[kwarg]}, " f"{KWARG_UPPER_BOUNDS[kwarg]})") # Generate random ratings network if "non_voter_proportion" in self.__tmp_kwargs: non_voter_proportion = self.__tmp_kwargs["non_voter_proportion"] else: non_voter_proportion = DEFAULT_NON_VOTER_PROPORTION if "q" in self.__tmp_kwargs: self._q: float = self.__tmp_kwargs["q"] * self._max_rating else: self._q: float = DEFAULT_Q * self._max_rating if "pay" in self.__tmp_kwargs: self._pay: float = self.__tmp_kwargs["pay"] else: self._pay: float = DEFAULT_PAY if "apathy" in self.__tmp_kwargs: self._apathy: float = self.__tmp_kwargs["apathy"] else: self._apathy: float = DEFAULT_APATHY if "d" in self.__tmp_kwargs: self._d: int = self.__tmp_kwargs["d"] else: self._d: int = DEFAULT_D if "true_averages" in self.__tmp_kwargs: true_averages = self.__tmp_kwargs["true_averages"] if true_averages.shape[0] != len(self._bribers): raise TrueAverageIncorrectShapeException(f"{true_averages.shape[0]} != {len(self._bribers)}") if not np.all(true_averages >= MIN_TRUE_AVERAGE): raise BriberKeywordArgumentOutOfBoundsException(f"All true averages must be >= {MIN_TRUE_AVERAGE}") if not np.all(true_averages <= MAX_TRUE_AVERAGE): raise BriberKeywordArgumentOutOfBoundsException(f"All true averages must be <= {MAX_TRUE_AVERAGE}") self._true_averages: np.ndarray[float] = true_averages else: self._true_averages: np.ndarray[float] = np.repeat(DEFAULT_TRUE_AVERAGE, len(self._bribers)) if "true_std_devs" in self.__tmp_kwargs: true_std_devs = self.__tmp_kwargs["true_std_devs"] if true_std_devs.shape[0] != len(self._bribers): raise TrueStdDevIncorrectShapeException(f"{true_std_devs.shape[0]} != {len(self._bribers)}") if not np.all(true_std_devs >= MIN_TRUE_STD_DEV): raise BriberKeywordArgumentOutOfBoundsException(f"All true std devs must be >= {MIN_TRUE_STD_DEV}") if not np.all(true_std_devs <= MAX_TRUE_STD_DEV): raise BriberKeywordArgumentOutOfBoundsException(f"All true std devs must be <= {MAX_TRUE_STD_DEV}") self._true_std_devs: np.ndarray[float] = true_std_devs else: self._true_std_devs: np.ndarray[float] = np.repeat(DEFAULT_TRUE_STD_DEV, len(self._bribers)) if "learning_rate" in self.__tmp_kwargs: self._learning_rate: float = self.__tmp_kwargs["learning_rate"] else: self._learning_rate: float = DEFAULT_LEARNING_RATE community_weights = {} for b, _ in enumerate(self._bribers): community_weights[b] = assign_traverse_averaged(self.get_graph(), self._true_averages[b], self._true_std_devs[b]) for n in self.get_graph().iterNodes(): for b, _ in enumerate(self._bribers): rating = community_weights[b][n] self._truths[n][b] = rating if random.random() > non_voter_proportion: self._votes[n][b] = rating else: self._votes[n][b] = np.nan self._time_step = 0 del self.__tmp_bribers, self.__tmp_kwargs @override def _finalise_init(self): """ Perform assertions that ensure everything is initialised """ from BribeNet.bribery.temporal.briber import TemporalBriber for briber in self._bribers: if not issubclass(briber.__class__, TemporalBriber): raise BriberNotSubclassOfTemporalBriberException("member of graph bribers not an instance of a " "subclass of TemporalBriber") super()._finalise_init() def get_time_step(self): return self._time_step def get_d(self): return self._d def get_last_bribery_actions(self): return self._last_bribery_actions def get_last_customer_action(self): return self._last_customer_action @abc.abstractmethod def _customer_action(self) -> CustomerAction: """ Perform the action of each customer in the graph """ raise NotImplementedError def _bribery_action(self) -> MultiBriberyAction: actions = [b.next_action() for b in self._bribers] return MultiBriberyAction.make_multi_action_from_single_actions(actions) def _update_trust(self): """ Update the weights of the graph based on the trust between nodes. """ # Get the weights and calculate the new weights first. new_weights = {} for (u, v) in self.get_edges(): prev_weight = self.get_weight(u, v) new_weight = prev_weight + self._learning_rate * (self.trust(u, v) - prev_weight) new_weights[(u, v)] = new_weight # Then set them, as some ratings systems could give different values # if the weights are modified during the calculations. for (u, v) in self.get_edges(): self.set_weight(u, v, new_weights[(u, v)]) def is_bribery_round(self): return not (self._time_step % self._d == self._d - 1) def step(self): """ Perform the next step, either bribery action or customer action and increment the time step We do d-1 bribery steps (self._time_step starts at 0) and then a customer step. """ if self.is_bribery_round(): bribery_action = self._bribery_action() bribery_action.perform_action() self._last_bribery_actions.append(bribery_action) else: customer_action = self._customer_action() customer_action.perform_action(pay=self._pay) self._last_customer_action = customer_action self._last_bribery_actions = [] self._update_trust() self._time_step += 1
/src/BribeNet/graph/temporal/thresholdGraph.py
import random from typing import List import numpy as np from BribeNet.graph.ratingGraph import DEFAULT_GEN from BribeNet.graph.temporal.action.actionType import ActionType from BribeNet.graph.temporal.action.customerAction import CustomerAction from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph, BriberKeywordArgumentOutOfBoundsException DEFAULT_THRESHOLD = 0.5 MIN_THRESHOLD = 0.0 MAX_THRESHOLD = 1.0 class ThresholdGraph(TemporalRatingGraph): def __init__(self, bribers, generator=DEFAULT_GEN, **kwargs): """ Threshold model for temporal rating graph :param bribers: the bribers active on the network :param generator: the generator to be used to generate the customer graph :param kwargs: additional parameters to the threshold temporal rating graph :keyword threshold: float - threshold for being considered :keyword remove_no_vote: bool - whether to allow non voted restaurants :keyword q: float - percentage of max rating given to non voted restaurants :keyword pay: float - the amount of utility gained by a restaurant when a customer visits :keyword apathy: float - the probability a customer does not visit any restaurant """ super().__init__(bribers, generator=generator, **kwargs) if "threshold" in kwargs: threshold = kwargs["threshold"] if not MIN_THRESHOLD <= threshold <= MAX_THRESHOLD: raise BriberKeywordArgumentOutOfBoundsException( f"threshold={threshold} out of bounds ({MIN_THRESHOLD}, {MAX_THRESHOLD})") self._threshold: float = threshold else: self._threshold: float = DEFAULT_THRESHOLD def _customer_action(self): # obtain customers ratings before any actions at this step, assumes all customers act simultaneously curr_ratings: List[List[float]] = [[self.get_rating(n, b.get_briber_id(), nan_default=0) for b in self._bribers] for n in self.get_customers()] voted: List[List[bool]] = [[len(self._neighbours(n, b.get_briber_id())) > 0 for b in self._bribers] for n in self.get_customers()] action = CustomerAction(self) for bribery_action in self._last_bribery_actions: action.set_bribed_from_bribery_action(bribery_action) # for each customer for n in self.get_graph().iterNodes(): # get weightings for restaurants # 0 if below_threshold, q if no votes weights = np.zeros(len(self._bribers)) for b in range(0, len(self._bribers)): # Check for no votes if not voted[n][b]: weights[b] = self._q # P-rating below threshold elif curr_ratings[n][b] < self._threshold: weights[b] = 0 # Else probability proportional to P-rating else: weights[b] = curr_ratings[n][b] # no restaurants above threshold so no action for this customer if np.count_nonzero(weights) == 0: continue # select at random selected = random.choices(range(0, len(self._bribers)), weights=weights)[0] if random.random() >= self._apathy: # has no effect by default (DEFAULT_APATHY = 0.0) if action.get_action_type(n) == ActionType.NONE: # if not already selected or bribed action.set_select(n, selected) return action
/src/BribeNet/graph/temporal/weighting/communityWeighting.py
import random # noinspection PyUnresolvedReferences from networkit import Graph # noinspection PyUnresolvedReferences from networkit.community import PLM def get_communities(graph: Graph) -> [[int]]: """ Gets the underlying communities of the graph, as sets of nodes. """ communities = PLM(graph, refine=False).run().getPartition() return [communities.getMembers(i) for i in communities.getSubsetIds()] def gauss_constrained(mean: float, std: float) -> float: return max(0, min(1, random.gauss(mean, std))) def get_std_dev(total_size: int, comm_size: int) -> float: """ In community generation, larger communities should have a smaller standard deviation (representing tighter-knit communities). This generates a std dev based on the ratio of the number of nodes in this community to the number of nodes in the total graph. Since we want a larger standard deviation for a smaller ratio, we take 1/ratio, which goes from total_size (for comm_size=1) to 1 (for ratio = 1). We divide this by total_size to get a normalised value, and then by 3 so that we can easily go three standard deviations without leaving the range. """ ratio = comm_size / total_size # range 0 to 1. return (1 / ratio) / (total_size * 3) def assign_community_weights(graph: Graph, mean: float, std_dev: float = 0.05) -> [float]: """ For each community, assign it a mean and then give values within it a normally distributed random value with that mean and standard deviation proportional to community size. """ weights = [0 for _ in graph.iterNodes()] communities = get_communities(graph) print(communities) total_size = len(weights) for community in communities: comm_size = len(community) comm_mean = gauss_constrained(mean, std_dev) if comm_size == 1: # noinspection PyTypeChecker # manually verified to be correct typing (rob) weights[community[0]] = comm_mean else: for node in community: # noinspection PyTypeChecker # manually verified to be correct typing (rob) weights[node] = gauss_constrained(comm_mean, get_std_dev(total_size, comm_size)) return weights
/src/BribeNet/graph/temporal/weighting/traverseWeighting.py
from random import gauss # noinspection PyUnresolvedReferences from networkit import Graph from numpy import mean as average def assign_traverse_averaged(graph: Graph, mean: float, std_dev: float = 0.2) -> [float]: """ Assign node 0 with the mean. Then assign all of its neighbours with a value close to that mean (weight + N(0, std_dev)), then their neighbours and so on. By properties of normals, every node has weight ~ N(mean, x * (std_dev**2)) where x is the shortest distance from node 0, but nodes that are linked share very similar weights. Locally similar, globally variable. This version allows nodes with already assigned weights to be affected, by tracking each weight as a set and using its average. """ weight_sets = [[] for _ in graph.iterNodes()] weight_sets[0] = [mean] nodeset = [0] while len(nodeset) > 0: node = nodeset[0] nodeset = nodeset[1:] for neighbour in graph.neighbors(node): if len(weight_sets[neighbour]) == 0: nodeset.append(neighbour) weight_sets[neighbour].append(average(weight_sets[node]) + gauss(0, std_dev)) weights = [average(weight_sets[i]) for i in range(len(weight_sets))] avg_weight = average(weights) return [min(1, max(0, weights[i] * mean / avg_weight)) for i in range(len(weights))] def assign_traverse_weights(graph: Graph, mean: float, std_dev: float = 0.05) -> [float]: """ Assign node 0 with the mean. Then assign all of its neighbours with a value close to that mean (weight + N(0, std_dev)), then their neighbours and so on. By properties of normals, every node has weight ~ N(mean, x * (std_dev**2)) where x is the shortest distance from node 0, but nodes that are linked share very similar weights. Locally similar, globally variable. """ weights = [-1 for _ in graph.iterNodes()] # noinspection PyTypeChecker weights[0] = mean nodeset = [0] while len(nodeset) > 0: node = nodeset[0] nodeset = nodeset[1:] for neighbour in graph.neighbors(node): if weights[neighbour] == -1: weights[neighbour] = weights[node] + gauss(0, std_dev) nodeset.append(neighbour) avg_weight = average(weights) return [min(1, max(0, weights[i] * mean / avg_weight)) for i in range(len(weights))]
/src/BribeNet/gui/apps/main.py
import tkinter as tk class Main(tk.Frame): """ Frame for the main menu of the GUI """ def __init__(self, master, *args, **kwargs): tk.Frame.__init__(self, master=master, *args, **kwargs) title_text = tk.Label(self, text="Bribery Networks", font=("Calibri", 16, "bold"), pady=20) title_text.pack() static_button = tk.Button(self, text="Static Model", command=self.master.show_static_gui, pady=10) static_button.pack(pady=10) temporal_button = tk.Button(self, text="Temporal Model", command=self.master.show_temporal_gui, pady=10) temporal_button.pack()
/src/BribeNet/gui/apps/static/briber_wizard/frame.py
import tkinter as tk class StaticBriberWizardFrame(tk.Frame): """ Frame for pop-up wizard for adding a static briber """ pass
/src/BribeNet/gui/apps/static/briber_wizard/window.py
import tkinter as tk class StaticBriberWizardWindow(tk.Toplevel): """ Window for pop-up wizard for adding a static briber """ pass
/src/BribeNet/gui/apps/static/graph.py
import tkinter as tk import matplotlib.pyplot as plt import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.colors import rgb2hex from networkit.nxadapter import nk2nx from networkit.viztasks import drawGraph from networkx import spring_layout class GraphFrame(tk.Frame): """ Frame for showing the current state and actions that can be taken for the static model being run """ def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller self.fig = plt.figure(figsize=(8, 8)) self.ax = self.fig.add_subplot(111) self.canvas = FigureCanvasTkAgg(self.fig, master=self) self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True) self.results = [] self.graph = None self.pos = None self.briber = None button1 = tk.Button(self, text="Exit", command=lambda: self.master.show_frame("WizardFrame")) button1.pack() button2 = tk.Button(self, text="Show Influential Nodes", command=self.show_influential) button2.pack() button3 = tk.Button(self, text="Bribe", command=self.next_bribe) button3.pack() button4 = tk.Button(self, text="Results", command=self.to_results) button4.pack() self.txt = tk.StringVar() lbl = tk.Label(self, textvariable=self.txt) lbl.pack() self.txt.set("Average P-Rating: -- \nLast Briber: --") def set_graph(self, graph, briber): self.graph = graph self.pos = spring_layout(nk2nx(self.graph.get_graph())) self.briber = briber self.results.append(self.graph.eval_graph()) self.display_graph() def to_results(self): self.master.plot_results(self.results) self.results = [] self.master.show_frame("ResultsFrame") def display_graph(self, last=None): cmap = plt.get_cmap("Purples") colors = [] for c in self.graph.get_customers(): if np.isnan(self.graph.get_vote(c)): colors.append("gray") else: colors.append(rgb2hex(cmap(self.graph.get_vote(c)[0])[:3])) # labels = {c: round(self.graph.p_rating(c), 2) for c in self.graph.get_customers()} self.ax.clear() drawGraph(self.graph.get_graph(), node_size=400, node_color=colors, ax=self.ax, pos=self.pos) for c in self.graph.get_customers(): if np.isnan(self.graph.get_vote(c)): rating = "None" else: rating = round(self.graph.get_vote(c)[0], 2) self.ax.annotate( str(c) + ":\n" + "Rating: " + str(rating) + "\n" + "PRating: " + str(round(self.graph.get_rating(c), 2)), xy=(self.pos[c][0], self.pos[c][1]), bbox=dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9) ) if last is not None: self.ax.add_artist(plt.Circle( (self.pos[last][0], self.pos[last][1]), 0.1, color="r", fill=False, linewidth=3.0 )) self.canvas.draw() avp = str(round(self.graph.eval_graph(), 2)) if last is not None: self.txt.set("Average P-Rating: " + avp + " \nLast Bribed: --") else: self.txt.set("Average P-Rating: " + avp + " \nLast Bribed: " + str(last)) def next_bribe(self): c = self.briber.next_bribe() self.display_graph(last=c) avp = self.graph.eval_graph() self.results.append(avp) self.canvas.draw() def show_influential(self): cmap = plt.get_cmap("Purples") colors = [] for c in self.graph.get_customers(): if self.graph.is_influential(c, charge_briber=False): colors.append("yellow") elif np.isnan(self.graph.get_vote(c)): colors.append("gray") else: colors.append(rgb2hex(cmap(self.graph.get_vote(c)[0])[:3])) self.ax.clear() for c in self.graph.get_customers(): if np.isnan(self.graph.get_vote(c)): rating = "None" else: rating = round(self.graph.get_vote(c)[0], 2) self.ax.annotate( str(c) + ":\n" + "Rating: " + str(rating) + "\n" + "PRating: " + str(round(self.graph.get_rating(c), 2)), xy=(self.pos[c][0], self.pos[c][1]), bbox=dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9) ) drawGraph(self.graph.get_graph(), node_size=500, node_color=colors, ax=self.ax, pos=self.pos) self.canvas.draw()
/src/BribeNet/gui/apps/static/result.py
import tkinter as tk import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg class ResultsFrame(tk.Frame): """ Frame for showing the current results of the static model being run """ def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller self.fig = plt.figure(figsize=(8, 8)) self.ax = self.fig.add_subplot(111) self.canvas = FigureCanvasTkAgg(self.fig, master=self) self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True) button1 = tk.Button(self, text="Exit", command=self.exit) button1.pack() self.results = [] def plot_results(self, results): xs = [i for i in range(0, len(results))] self.ax.clear() self.ax.plot(xs, results) self.ax.set_xlabel("Moves over time") self.ax.set_ylabel("Average P-rating") self.canvas.draw() def exit(self): self.results = [] self.master.show_frame("WizardFrame")
/src/BribeNet/gui/apps/static/static.py
import tkinter as tk from BribeNet.bribery.static.oneMoveInfluentialNodeBriber import OneMoveInfluentialNodeBriber from BribeNet.bribery.static.oneMoveRandomBriber import OneMoveRandomBriber from BribeNet.graph.generation import GraphGeneratorAlgo from BribeNet.graph.generation.flatWeightGenerator import FlatWeightedGraphGenerator from BribeNet.graph.static.ratingGraph import StaticRatingGraph from BribeNet.gui.apps.static.graph import GraphFrame from BribeNet.gui.apps.static.result import ResultsFrame from BribeNet.gui.apps.static.wizard.wizard import WizardFrame from BribeNet.helpers.override import override FRAMES_CLASSES = [WizardFrame, GraphFrame, ResultsFrame] FRAMES_DICT = {i: c.__class__.__name__ for (i, c) in enumerate(FRAMES_CLASSES)} def switch_briber(argument): switcher = { "r": lambda: OneMoveRandomBriber(10), "i": lambda: OneMoveInfluentialNodeBriber(10) } return switcher.get(argument) class StaticGUI(tk.Toplevel): """ Window for the static wizard and running environment """ def __init__(self, controller, *args, **kwargs): super().__init__(controller, *args, **kwargs) self.title("Static Model") self.controller = controller self.grid_rowconfigure(0, weight=1) self.grid_columnconfigure(0, weight=1) self.frames = {} for F in FRAMES_CLASSES: page_name = F.__name__ frame = F(parent=self, controller=controller) self.frames[page_name] = frame frame.grid(row=0, column=0, sticky="nsew") self.show_frame("WizardFrame") def show_frame(self, page): frame = self.frames[page] frame.tkraise() def generate_graph(self, gtype, btype): briber = switch_briber(btype)() ba_gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.BARABASI_ALBERT, 5, 30, 0, True) comp_gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.COMPOSITE, 50, 5, 2, 0.1, 3, 0.05) if gtype == "ba": rg = StaticRatingGraph(briber, generator=ba_gen) elif gtype == "cg": rg = StaticRatingGraph(briber, generator=comp_gen) else: rg = StaticRatingGraph(briber) self.frames["GraphFrame"].set_graph(rg, briber) def plot_results(self, results): self.frames["ResultsFrame"].plot_results(results) @override def destroy(self): if self.controller is not None: self.controller.show_main() super().destroy() if __name__ == '__main__': app = StaticGUI(None) app.mainloop()
/src/BribeNet/gui/apps/static/wizard/algos/barabasi_albert.py
import tkinter as tk from BribeNet.gui.classes.param_list_frame import ParamListFrame class BarabasiAlbert(ParamListFrame): name = "Barabási-Albert" def __init__(self, parent): super().__init__(parent) self.params = { 'k': tk.DoubleVar(self, value=5), 'n_max': tk.IntVar(self, value=30), 'n_0': tk.IntVar(self, value=0) } self.descriptions = { 'k': 'number of attachments per node', 'n_max': 'number of nodes in the graph', 'n_0': 'number of connected nodes to begin with' } self.grid_params(show_name=False)
/src/BribeNet/gui/apps/static/wizard/algos/composite.py
import tkinter as tk from BribeNet.gui.classes.param_list_frame import ParamListFrame class Composite(ParamListFrame): name = "Composite" def __init__(self, parent): super().__init__(parent) self.params = { 'n_nodes': tk.IntVar(self, value=50), 'n_communities': tk.IntVar(self, value=5), 'n_neighbours': tk.IntVar(self, value=2), 'p_rewiring': tk.DoubleVar(self, value=0.3), 'k': tk.DoubleVar(self, value=3), 'p_reduce': tk.DoubleVar(self, value=0.05) } self.descriptions = { 'n_nodes': 'number of nodes in the graph', 'n_communities': 'how many small world networks the composite network should consist of', 'n_neighbours': 'how many neighbours each node should have at the start of small world generation (k from ' 'Watts-Strogatz)', 'p_rewiring': 'the probability of rewiring a given edge during small world network generation (p from ' 'Watts-Strogatz)', 'k': 'number of attachments per community (k for Barabasi-Albert for our parent graph)', 'p_reduce': "how much the probability of joining two nodes in two different communities is reduced by - " "once a successful connection is made, the probability of connecting two edges p' becomes p' " "* probability_reduce " } self.grid_params(show_name=False)
/src/BribeNet/gui/apps/static/wizard/algos/watts_strogatz.py
import tkinter as tk from BribeNet.gui.classes.param_list_frame import ParamListFrame class WattsStrogatz(ParamListFrame): name = "Watts-Strogatz" def __init__(self, parent): super().__init__(parent) self.params = { 'n_nodes': tk.IntVar(self, value=30), 'n_neighbours': tk.IntVar(self, value=5), 'p': tk.DoubleVar(self, value=0.3) } self.descriptions = { 'n_nodes': 'number of nodes in the graph', 'n_neighbours': 'number of neighbors on each side of a node', 'p': 'the probability of rewiring a given edge' } self.grid_params(show_name=False)
/src/BribeNet/gui/apps/static/wizard/generation.py
import tkinter as tk from BribeNet.gui.apps.static.wizard.algos.barabasi_albert import BarabasiAlbert from BribeNet.gui.apps.static.wizard.algos.composite import Composite from BribeNet.gui.apps.static.wizard.algos.watts_strogatz import WattsStrogatz ALGO_SUBFRAMES = (BarabasiAlbert, Composite, WattsStrogatz) ALGO_DICT = {v: k for k, v in enumerate([a.name for a in ALGO_SUBFRAMES])} class StaticGeneration(tk.Frame): def __init__(self, parent): super().__init__(parent) self.parent = parent self.graph_type = tk.StringVar(self) title_label = tk.Label(self, text='Graph Generation Algorithm') title_label.grid(row=0, column=0, pady=10) self.subframes = tuple(c(self) for c in ALGO_SUBFRAMES) self.options = tuple(f.get_name() for f in self.subframes) self.dropdown = tk.OptionMenu(self, self.graph_type, *self.options) self.dropdown.grid(row=1, column=0, pady=10, sticky='nsew') self.graph_type.set(self.options[0]) for f in self.subframes: f.grid(row=2, column=0, sticky="nsew") self.graph_type.trace('w', self.switch_frame) self.show_subframe(0) def show_subframe(self, page_no): frame = self.subframes[page_no] frame.tkraise() # noinspection PyUnusedLocal def switch_frame(self, *args): self.show_subframe(ALGO_DICT[self.graph_type.get()]) def get_args(self): return self.subframes[ALGO_DICT[self.graph_type.get()]].get_args() def get_graph_type(self): return self.graph_type.get()
/src/BribeNet/gui/apps/static/wizard/wizard.py
import tkinter as tk class WizardFrame(tk.Frame): """ Frame for the wizard to construct a static model run """ def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller gtype = tk.StringVar(parent) gtype.set("L") btype = tk.StringVar(parent) btype.set("L") rb1 = tk.Radiobutton(self, variable=gtype, value="ws", text="Watts-Strogatz") rb2 = tk.Radiobutton(self, variable=gtype, value="ba", text="Barabási–Albert") rb3 = tk.Radiobutton(self, variable=gtype, value="cg", text="Composite Generator") rb1.grid(row=0, column=0) rb2.grid(row=1, column=0) rb3.grid(row=2, column=0) rba = tk.Radiobutton(self, variable=btype, value="r", text="Random") rbb = tk.Radiobutton(self, variable=btype, value="i", text="Influential") rba.grid(row=0, column=1) rbb.grid(row=1, column=1) b = tk.Button(self, text="Graph + Test", command=lambda: self.on_button(gtype.get(), btype.get())) b.grid(row=1, column=2) def on_button(self, gtype, btype): self.master.generate_graph(gtype, btype) self.master.show_frame("GraphFrame")
/src/BribeNet/gui/apps/temporal/briber_wizard/frame.py
import tkinter as tk from BribeNet.gui.apps.temporal.briber_wizard.strategies.budget import BudgetFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.even import EvenFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.influential import InfluentialFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.most_influential import MostInfluentialFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.non import NonFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.p_greedy import PGreedyFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.random import RandomFrame STRAT_SUBFRAMES = (NonFrame, RandomFrame, InfluentialFrame, MostInfluentialFrame, EvenFrame, BudgetFrame, PGreedyFrame) STRAT_DICT = {v: k for k, v in enumerate([a.name for a in STRAT_SUBFRAMES])} class TemporalBriberWizardFrame(tk.Frame): """ Frame for pop-up wizard for adding a temporal briber """ def __init__(self, parent): super().__init__(parent) self.parent = parent self.strat_type = tk.StringVar(self) self.subframes = tuple(c(self) for c in STRAT_SUBFRAMES) self.options = tuple(f.get_name() for f in self.subframes) self.dropdown = tk.OptionMenu(self, self.strat_type, *self.options) self.dropdown.grid(row=0, column=0) self.strat_type.set(self.options[0]) for f in self.subframes: f.grid(row=1, column=0, sticky="nsew", pady=20) self.strat_type.trace('w', self.switch_frame) self.show_subframe(0) self.submit_button = tk.Button(self, text="Submit", command=self.add_briber) self.submit_button.grid(row=2, column=0) def show_subframe(self, page_no): frame = self.subframes[page_no] frame.tkraise() # noinspection PyUnusedLocal def switch_frame(self, *args): self.show_subframe(STRAT_DICT[self.strat_type.get()]) def get_args(self): return self.subframes[STRAT_DICT[self.strat_type.get()]].get_args() def get_graph_type(self): return self.strat_type.get() def add_briber(self): self.parent.controller.add_briber(self.get_graph_type(), *(self.get_args())) self.parent.destroy()
/src/BribeNet/gui/apps/temporal/briber_wizard/strategies/p_greedy.py
import tkinter as tk from BribeNet.gui.classes.param_list_frame import ParamListFrame class PGreedyFrame(ParamListFrame): name = "P-Greedy" def __init__(self, parent): super().__init__(parent) self.params = { 'u_0': tk.DoubleVar(self, value=10), 'true_average': tk.DoubleVar(self, value=0.5), 'true_std_dev': tk.DoubleVar(self, value=0.2) } self.descriptions = { 'u_0': 'starting budget', 'true_average': 'the average of customer ground truth for this briber', 'true_std_dev': 'the standard deviation of customer ground truth for this briber' } self.grid_params(show_name=False)
/src/BribeNet/gui/apps/temporal/briber_wizard/window.py
import tkinter as tk from BribeNet.gui.apps.temporal.briber_wizard.frame import TemporalBriberWizardFrame from BribeNet.helpers.override import override class TemporalBriberWizardWindow(tk.Toplevel): """ Window for pop-up wizard for adding a temporal briber """ def __init__(self, controller): super().__init__(controller) self.title("Briber Wizard") self.controller = controller self.frame = TemporalBriberWizardFrame(self) self.frame.pack(pady=10, padx=10) @override def destroy(self): self.controller.briber_wizard = None super().destroy()
/src/BribeNet/gui/apps/temporal/graph.py
import tkinter as tk import matplotlib.pyplot as plt import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.colors import rgb2hex from networkit.viztasks import drawGraph from BribeNet.gui.apps.temporal.results_wizard.window import TemporalResultsWizardWindow class GraphFrame(tk.Frame): """ Frame for showing the current state and actions that can be taken for the temporal model being run """ def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller self.parent = parent self.fig = plt.figure(figsize=(8, 8)) self.ax = self.fig.add_subplot(111) self.canvas = FigureCanvasTkAgg(self.fig, master=self) self.grid_rowconfigure(1, weight=1) self.canvas.get_tk_widget().grid(row=1, column=0, rowspan=10) self.results = [] self.pos = None self.gamma = None self.briber_buttons = None self.briber_name_to_index = None self.rating_string_var = None step_button = tk.Button(self, text="Next Step", command=self.controller.next_step) step_button.grid(row=3, column=2, sticky='nsew') results_button = tk.Button(self, text="Results", command=self.show_results_wizard) results_button.grid(row=4, column=2, sticky='nsew') exit_button = tk.Button(self, text="Exit", command=self.return_to_wizard) exit_button.grid(row=7, column=2, sticky='nsew') steps_slide = tk.Scale(self, from_=1, to=100, orient=tk.HORIZONTAL) steps_slide.grid(row=6, column=2, sticky='nsew') n_steps_button = tk.Button(self, text="Perform n steps", command=lambda: self.n_steps(steps_slide.get())) n_steps_button.grid(row=5, column=2, sticky='nsew') self.info = tk.StringVar(parent) round_desc_canvas = tk.Canvas(self) round_desc_scroll = tk.Scrollbar(self, orient='vertical', command=round_desc_canvas.yview) round_desc_frame = tk.Frame(self) round_desc_frame.bind( "<Configure>", lambda e: round_desc_canvas.configure( scrollregion=round_desc_canvas.bbox("all") ) ) round_desc_canvas.create_window((0, 0), window=round_desc_frame, anchor="n") round_desc_canvas.config(yscrollcommand=round_desc_scroll.set) round_desc_label = tk.Label(round_desc_frame, textvariable=self.info) round_desc_label.pack(fill=tk.BOTH, expand=1) round_desc_canvas.grid(row=1, column=1, columnspan=2, pady=10, padx=10, sticky='nsew') round_desc_scroll.grid(row=1, column=2, pady=10, sticky='nse') self.info.set("--") def return_to_wizard(self): self.results = [] self.info.set("--") self.controller.clear_graph() self.controller.show_frame("WizardFrame") def set_info(self, s): self.info.set(s) def set_pos(self, pos): self.pos = pos def n_steps(self, n): for i in range(0, n): self.controller.next_step() def add_briber_dropdown(self): view_title_label = tk.Label(self, text="View rating for briber") view_title_label.grid(row=3, column=1) rating_choices = ['None'] + self.controller.briber_names self.briber_name_to_index = {v: k for k, v in enumerate(self.controller.briber_names)} self.rating_string_var = tk.StringVar(self) self.rating_string_var.set('None') rating_dropdown = tk.OptionMenu(self, self.rating_string_var, *rating_choices) # noinspection PyUnusedLocal def change_dropdown(*args): var_val = self.rating_string_var.get() if var_val == 'None': self.draw_basic_graph(self.controller.g) else: self.draw_briber_graph(self.briber_name_to_index[var_val]) self.rating_string_var.trace('w', change_dropdown) rating_dropdown.grid(row=4, column=1, sticky='nsew') trust_button = tk.Button(self, text="Show Trust", command=lambda: self.show_trust(self.controller.g)) trust_button.grid(row=6, column=1, sticky='nsew') def show_results_wizard(self): results_wizard = TemporalResultsWizardWindow(self.controller, self.controller.results) results_wizard.lift() def draw_basic_graph(self, graph): colours = ["gray" for _ in graph.get_customers()] # nodes edge_colours = ["#000000" for _ in graph.get_edges()] # edges self._update_graph(graph, colours, edge_colours) self.canvas.draw() def draw_briber_graph(self, b): # node colours graph = self.controller.g colour_map = plt.get_cmap("Purples") colours = [] for c in graph.get_customers(): if np.isnan(graph.get_vote(c)[b]): colours.append("gray") else: colours.append(rgb2hex(colour_map(graph.get_vote(c)[b])[:3])) edge_colours = ["#000000" for _ in graph.get_edges()] # edges self._update_graph(graph, colours, edge_colours) self._add_annotations(b) self.canvas.draw() def _update_graph(self, graph, colours, edge_colours): self.ax.clear() drawGraph( graph.get_graph(), node_size=400, node_color=colours, edge_color=edge_colours, ax=self.ax, pos=self.pos, with_labels=True ) def _add_annotations(self, b): graph = self.controller.g for c in graph.get_customers(): if np.isnan(graph.get_vote(c)[b]): rating = "None" else: rating = round(graph.get_vote(c)[b], 2) self.ax.annotate( str(c) + ":\n" + "Vote: " + str(rating) + "\n" + "Rating: " + str(round(graph.get_rating(c), 2)), xy=(self.pos[c][0], self.pos[c][1]), bbox=dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9) ) def show_trust(self, graph): colours = ["gray" for _ in graph.get_customers()] # nodes colour_map = plt.get_cmap("Greys") edge_colours = [] for (u, v) in graph.get_edges(): edge_colours.append(rgb2hex(colour_map(graph.get_weight(u, v))[:3])) self._update_graph(graph, colours, edge_colours) self.canvas.draw()
/src/BribeNet/gui/apps/temporal/main.py
import tkinter as tk import os from networkit.nxadapter import nk2nx from networkx import spring_layout from BribeNet.bribery.temporal.budgetNodeBriber import BudgetNodeBriber from BribeNet.bribery.temporal.influentialNodeBriber import InfluentialNodeBriber from BribeNet.bribery.temporal.mostInfluentialNodeBriber import MostInfluentialNodeBriber from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.bribery.temporal.oneMoveEvenBriber import OneMoveEvenBriber from BribeNet.bribery.temporal.oneMoveRandomBriber import OneMoveRandomBriber from BribeNet.bribery.temporal.pGreedyBriber import PGreedyBriber from BribeNet.graph.generation import GraphGeneratorAlgo from BribeNet.graph.generation.flatWeightGenerator import FlatWeightedGraphGenerator from BribeNet.graph.temporal.action.actionType import ActionType from BribeNet.graph.temporal.thresholdGraph import ThresholdGraph from BribeNet.gui.apps.static.wizard.algos.barabasi_albert import BarabasiAlbert from BribeNet.gui.apps.static.wizard.algos.composite import Composite from BribeNet.gui.apps.temporal.briber_wizard.strategies.budget import BudgetFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.even import EvenFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.influential import InfluentialFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.most_influential import MostInfluentialFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.non import NonFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.p_greedy import PGreedyFrame from BribeNet.gui.apps.temporal.briber_wizard.strategies.random import RandomFrame from BribeNet.gui.apps.temporal.graph import GraphFrame from BribeNet.gui.apps.temporal.result import ResultsFrame from BribeNet.gui.apps.temporal.results_wizard.results import ResultsStore from BribeNet.gui.apps.temporal.wizard.wizard import WizardFrame from BribeNet.helpers.override import override FRAMES_CLASSES = (WizardFrame, GraphFrame, ResultsFrame) FRAMES_DICT = {i: c.__class__.__name__ for (i, c) in enumerate(FRAMES_CLASSES)} X_AXIS_OPTIONS = ("Time", "Utility Spent") Y_AXIS_OPTIONS = ("Average Rating", "Total Utility", "Average Trust") def switch_briber(strategy_type, *args): switcher = { RandomFrame.name: OneMoveRandomBriber, InfluentialFrame.name: InfluentialNodeBriber, MostInfluentialFrame.name: MostInfluentialNodeBriber, NonFrame.name: NonBriber, EvenFrame.name: OneMoveEvenBriber, BudgetFrame.name: BudgetNodeBriber, PGreedyFrame.name: PGreedyBriber } return switcher.get(strategy_type)(*args) class TemporalGUI(tk.Toplevel): """ Window for the temporal wizard and running environment """ def __init__(self, controller, *args, **kwargs): super().__init__(controller, *args, **kwargs) self.title("Temporal Model") self.controller = controller # application window container = tk.Frame(self) container.grid(row=0, column=0, sticky='nsew') container.grid_rowconfigure(0, weight=1) container.grid_columnconfigure(0, weight=1) # frame for each displayed page self.frames = {} for F in FRAMES_CLASSES: page_name = F.__name__ frame = F(parent=container, controller=self) self.frames[page_name] = frame frame.grid(row=0, column=0, sticky="nsew") self.show_frame(WizardFrame.__name__) self.bribers = [] self.bribers_spent = [] self.results = ResultsStore(X_AXIS_OPTIONS, Y_AXIS_OPTIONS) self.briber_names = [] self.g = None def clear_graph(self): self.bribers = [] self.bribers_spent = [] self.results = ResultsStore(X_AXIS_OPTIONS, Y_AXIS_OPTIONS) self.briber_names = [] self.g = None def show_frame(self, page): self.frames[page].tkraise() def add_briber(self, b, *args): self.bribers.append(switch_briber(b, *args)) self.bribers_spent.append(0) self.briber_names.append(f"Briber{len(self.bribers)}: {b}: u0={args[0]}") def add_graph(self, gtype, args, params): if not self.bribers: raise RuntimeError("No Bribers added to graph") if gtype == BarabasiAlbert.name: gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.BARABASI_ALBERT, *args) elif gtype == Composite.name: gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.COMPOSITE, *args) else: gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.WATTS_STROGATZ, *args) self.g = ThresholdGraph( tuple(self.bribers), generator=gen, non_voter_proportion=params[0], threshold=params[1], d=params[2], q=params[3], pay=params[4], apathy=params[5], learning_rate=params[6], true_averages=params[7], true_std_devs=params[8] ) self.frames[GraphFrame.__name__].set_pos(spring_layout(nk2nx(self.g.get_graph()))) self.frames[GraphFrame.__name__].add_briber_dropdown() self.frames[GraphFrame.__name__].draw_basic_graph(self.g) def update_results(self): self.results.add("Average Rating", [self.g.average_rating(briber_id=b) for b in range(0, len(self.bribers))]) self.results.add("Total Utility", [b.get_resources() for b in self.bribers]) self.results.add("Average Trust", self.g.average_trust()) self.results.add("Utility Spent", [self.bribers_spent[b] for b in range(0, len(self.bribers))]) self.results.add("Time", self.g.get_time_step()) def plot_results(self, x_label, y_label): self.frames[ResultsFrame.__name__].plot_results(self.results, x_label, y_label) self.show_frame(ResultsFrame.__name__) def next_step(self): last_round_was_bribery = self.g.is_bribery_round() self.g.step() if last_round_was_bribery: for bribers, bribe in self.g.get_last_bribery_actions()[-1].get_bribes().items(): self.bribers_spent[bribers] += sum(bribe.values()) self.update_results() if last_round_was_bribery: info = "BRIBES\n" for bribers, bribe in self.g.get_last_bribery_actions()[-1].get_bribes().items(): for c, n in bribe.items(): info += f"Briber {bribers + 1}: {c} --> {n}\n" else: info = "CUSTOMERS\n" for c, a in self.g.get_last_customer_action().actions.items(): if a[0] == ActionType.NONE: info += f"Customer {c}: No Action\n" elif a[0] == ActionType.BRIBED: info += f"Customer {c}: Bribed to {a[1]}\n" elif a[0] == ActionType.SELECT: info += f"Customer {c}: Going to {a[1]}\n" self.frames[GraphFrame.__name__].draw_basic_graph(self.g) self.frames[GraphFrame.__name__].set_info(info) @override def destroy(self): if self.controller is not None: self.controller.show_main() super().destroy() if __name__ == '__main__': app = TemporalGUI(None) app.mainloop()
/src/BribeNet/gui/apps/temporal/result.py
import tkinter as tk import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from BribeNet.gui.apps.temporal.results_wizard.window import TemporalResultsWizardWindow class ResultsFrame(tk.Frame): def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller self.parent = parent self.fig = plt.figure(figsize=(8, 8)) self.ax = self.fig.add_subplot(111) self.canvas = FigureCanvasTkAgg(self.fig, master=self) self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True) replot_button = tk.Button(self, text="Change Variables", command=self.replot) replot_button.pack() exit_button = tk.Button(self, text="Exit", command=self.exit) exit_button.pack() def plot_results(self, results, x_label, y_label): self.ax.clear() # for each briber xs = results.get(x_label) ys = results.get(y_label) if not isinstance(xs[0], list) and not isinstance(ys[0], list): self.ax.plot(xs, ys) else: for b in range(0, len(self.controller.briber_names)): x_plot = [r[b] for r in xs] if isinstance(xs[0], list) else xs y_plot = [r[b] for r in ys] if isinstance(ys[0], list) else ys print(x_plot) print(y_plot) self.ax.plot(x_plot, y_plot, label=self.controller.briber_names[b]) self.ax.legend() self.ax.set_xlabel(x_label) self.ax.set_ylabel(y_label) self.canvas.draw() def replot(self): results_wizard = TemporalResultsWizardWindow(self.controller, self.controller.results) results_wizard.lift() def exit(self): self.controller.show_frame("GraphFrame")
/src/BribeNet/gui/apps/temporal/results_wizard/frame.py
import tkinter as tk class TemporalResultsWizardFrame(tk.Frame): """ Frame for pop-up wizard for selecting results displayed """ def __init__(self, parent, results): super().__init__(parent) self.parent = parent self.x_string_var = tk.StringVar(self) self.y_string_var = tk.StringVar(self) self.title_text = tk.Label(self, text="Select Values", font=("Calibri", 16, "bold"), pady=20) self.title_text.grid(row=0, column=0, columnspan=2) self.x_text = tk.Label(self, text="X-axis", padx=20, pady=10) self.x_text.grid(row=1, column=0) self.y_text = tk.Label(self, text="Y-axis", padx=20, pady=10) self.y_text.grid(row=2, column=0) x_options = results.get_x_options() self.x_string_var.set(x_options[0]) self.drop_xs = tk.OptionMenu(self, self.x_string_var, *x_options) self.drop_xs.grid(row=1, column=1, sticky='ew') y_options = results.get_y_options() self.y_string_var.set(y_options[0]) self.drop_ys = tk.OptionMenu(self, self.y_string_var, *y_options) self.drop_ys.grid(row=2, column=1, sticky='ew') self.submit_button = tk.Button(self, text="Submit", command=self.submit) self.submit_button.grid(row=3, column=0, columnspan=2, pady=20, sticky='nsew') def submit(self): self.parent.controller.plot_results(self.x_string_var.get(), self.y_string_var.get()) self.parent.destroy()
/src/BribeNet/gui/apps/temporal/results_wizard/results.py
class ResultsStore: """ Class for storing results during runs, identified by keys, seperated to xs and ys """ def __init__(self, xs, ys): self.xs = xs self.ys = ys self.data = {k: [] for k in (xs + ys)} def add(self, k, v): self.data[k].append(v) def get(self, k): return self.data[k] def get_x_options(self): return self.xs def get_y_options(self): return self.ys
/src/BribeNet/gui/apps/temporal/results_wizard/window.py
import tkinter as tk from BribeNet.gui.apps.temporal.results_wizard.frame import TemporalResultsWizardFrame class TemporalResultsWizardWindow(tk.Toplevel): """ Window for pop-up wizard for selecting results displayed """ def __init__(self, controller, results): super().__init__(controller) self.title("Results Wizard") self.controller = controller self.frame = TemporalResultsWizardFrame(self, results) self.frame.pack(pady=10, padx=10)
/src/BribeNet/gui/apps/temporal/wizard/bribers.py
import tkinter as tk from BribeNet.gui.apps.temporal.briber_wizard.window import TemporalBriberWizardWindow from BribeNet.helpers.override import override class TemporalBribers(tk.Frame): def __init__(self, parent): super().__init__(parent) self.briber_wizard = None self.bribers_list = [] bribers_title_label = tk.Label(self, text="Bribers") bribers_title_label.grid(row=1, column=1, columnspan=2, pady=10) self.bribers_listbox = tk.Listbox(self) self.bribers_listbox.grid(row=2, column=1, rowspan=3) scrollbar = tk.Scrollbar(self, orient="vertical") scrollbar.config(command=self.bribers_listbox.yview) scrollbar.grid(row=2, column=2, rowspan=3, sticky="ns") self.bribers_listbox.config(yscrollcommand=scrollbar.set) self.add_briber_button = tk.Button(self, text="Add", command=self.open_briber_wizard) self.add_briber_button.grid(row=2, column=3, sticky='nsew') self.duplicate_briber_button = tk.Button(self, text="Duplicate", command=self.duplicate_selected_briber) self.duplicate_briber_button.grid(row=3, column=3, sticky='nsew') self.delete_briber_button = tk.Button(self, text="Delete", command=self.delete_selected_briber) self.delete_briber_button.grid(row=4, column=3, sticky='nsew') def open_briber_wizard(self): if self.briber_wizard is None: self.briber_wizard = TemporalBriberWizardWindow(self) else: self.briber_wizard.lift() def duplicate_selected_briber(self): cur_sel = self.bribers_listbox.curselection() if not cur_sel: return self.bribers_list.append(self.bribers_list[cur_sel[0]]) self.bribers_listbox.insert(tk.END, self.bribers_list[cur_sel[0]][0]) def delete_selected_briber(self): cur_sel = self.bribers_listbox.curselection() if not cur_sel: return self.bribers_listbox.delete(cur_sel[0]) del self.bribers_list[cur_sel[0]] def add_briber(self, strat_type, *args): self.bribers_list.append((strat_type, *args)) self.bribers_listbox.insert(tk.END, strat_type) def get_all_bribers(self): return self.bribers_list @override def destroy(self): if self.briber_wizard is not None: self.briber_wizard.destroy() super().destroy()
/src/BribeNet/gui/apps/temporal/wizard/generation.py
from BribeNet.gui.apps.static.wizard.generation import StaticGeneration class TemporalGeneration(StaticGeneration): pass
/src/BribeNet/gui/apps/temporal/wizard/rating_method.py
import tkinter as tk from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.median_p_rating import MedianPRating from BribeNet.gui.apps.temporal.wizard.rating_methods.o_rating import ORating from BribeNet.gui.apps.temporal.wizard.rating_methods.p_gamma_rating import PGammaRating from BribeNet.gui.apps.temporal.wizard.rating_methods.p_rating import PRating from BribeNet.gui.apps.temporal.wizard.rating_methods.weighted_median_p_rating import WeightedMedianPRating from BribeNet.gui.apps.temporal.wizard.rating_methods.weighted_p_rating import WeightedPRating METHOD_SUBFRAMES = (ORating, PRating, MedianPRating, PGammaRating, WeightedPRating, WeightedMedianPRating) METHOD_DICT = {v: k for k, v in enumerate([a.name for a in METHOD_SUBFRAMES])} class TemporalRatingMethod(tk.Frame): """ Frame for pop-up wizard for adding a temporal briber """ def __init__(self, parent): super().__init__(parent) self.parent = parent self.method_type = tk.StringVar(self) self.subframes = tuple(c(self) for c in METHOD_SUBFRAMES) self.options = tuple(f.get_name() for f in self.subframes) name_label = tk.Label(self, text="Rating Method") name_label.grid(row=0, column=0, pady=10) self.dropdown = tk.OptionMenu(self, self.method_type, *self.options) self.dropdown.grid(row=1, column=0, pady=10) self.method_type.set(self.options[0]) for f in self.subframes: f.grid(row=2, column=0, sticky="nsew", pady=20) self.method_type.trace('w', self.switch_frame) self.show_subframe(1) # (p-rating) def show_subframe(self, page_no): frame = self.subframes[page_no] frame.tkraise() # noinspection PyUnusedLocal def switch_frame(self, *args): self.show_subframe(METHOD_DICT[self.method_type.get()]) def get_rating_method(self) -> RatingMethod: return self.subframes[METHOD_DICT[self.method_type.get()]].enum_value def get_args(self): return self.subframes[METHOD_DICT[self.method_type.get()]].get_args()
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/median_p_rating.py
from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.rating_method_frame import RatingMethodFrame class MedianPRating(RatingMethodFrame): enum_value = RatingMethod.MEDIAN_P_RATING name = 'median_p_rating'
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/o_rating.py
from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.rating_method_frame import RatingMethodFrame class ORating(RatingMethodFrame): enum_value = RatingMethod.O_RATING name = 'o_rating'
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/p_gamma_rating.py
import tkinter as tk from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.rating_method_frame import RatingMethodFrame class PGammaRating(RatingMethodFrame): enum_value = RatingMethod.P_GAMMA_RATING name = 'p_gamma_rating' def __init__(self, parent): super().__init__(parent) self.params = { 'gamma': tk.DoubleVar(self, value=0.05) } self.descriptions = { 'gamma': 'dampening factor that defines the effect of nodes based on their distance' } self.grid_params(show_name=False)
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/p_rating.py
from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.rating_method_frame import RatingMethodFrame class PRating(RatingMethodFrame): enum_value = RatingMethod.P_RATING name = 'p_rating'
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/rating_method_frame.py
import abc from typing import Optional from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.classes.param_list_frame import ParamListFrame class RatingMethodFrame(ParamListFrame, abc.ABC): enum_value: Optional[RatingMethod] = None def __init__(self, parent): super().__init__(parent)
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/weighted_median_p_rating.py
from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.rating_method_frame import RatingMethodFrame class WeightedMedianPRating(RatingMethodFrame): enum_value = RatingMethod.WEIGHTED_MEDIAN_P_RATING name = 'weighted_median_p_rating'
/src/BribeNet/gui/apps/temporal/wizard/rating_methods/weighted_p_rating.py
from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.rating_methods.rating_method_frame import RatingMethodFrame class WeightedPRating(RatingMethodFrame): enum_value = RatingMethod.WEIGHTED_P_RATING name = 'weighted_p_rating'
/src/BribeNet/gui/apps/temporal/wizard/settings.py
import tkinter as tk from BribeNet.gui.classes.param_list_frame import ParamListFrame class TemporalSettings(ParamListFrame): name = 'Model Parameters' def __init__(self, parent): super().__init__(parent) self.descriptions = { 'non_voter_proportion': 'the proportion of customers which start with no vote', 'threshold': 'the minimum rating for a customer to consider visiting a bribing actor', 'd': 'the period of non-bribery rounds (minimum 2)', 'q': 'the vote value to use in place of non-votes in rating calculations', 'pay': 'the amount of utility given to a bribing actor each time a customer chooses them', 'apathy': 'the probability that a customer performs no action', 'learning_rate': 'how quickly the edge weights are updated by trust' } self.params = { 'non_voter_proportion': tk.DoubleVar(self, value=0.2), 'threshold': tk.DoubleVar(self, value=0.5), 'd': tk.IntVar(self, value=2), 'q': tk.DoubleVar(self, value=0.5), 'pay': tk.DoubleVar(self, value=1.0), 'apathy': tk.DoubleVar(self, value=0.0), 'learning_rate': tk.DoubleVar(self, value=0.1), } self.grid_params()
/src/BribeNet/gui/apps/temporal/wizard/wizard.py
import tkinter as tk import numpy as np from BribeNet.graph.ratingMethod import RatingMethod from BribeNet.gui.apps.temporal.wizard.bribers import TemporalBribers from BribeNet.gui.apps.temporal.wizard.generation import TemporalGeneration from BribeNet.gui.apps.temporal.wizard.rating_method import TemporalRatingMethod from BribeNet.gui.apps.temporal.wizard.settings import TemporalSettings from BribeNet.helpers.bribeNetException import BribeNetException SUBFRAME_CLASSES = (TemporalSettings, TemporalBribers, TemporalGeneration, TemporalRatingMethod) SUBFRAME_DICT = {i: c.__class__.__name__ for (i, c) in enumerate(SUBFRAME_CLASSES)} class WizardFrame(tk.Frame): """ Frame for the wizard to construct a temporal model run """ def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.parent = parent self.controller = controller self.subframes = {} for c in SUBFRAME_CLASSES: page_name = c.__name__ frame = c(self) self.subframes[page_name] = frame self.subframes[TemporalSettings.__name__].grid(row=0, column=0, padx=10, pady=10, sticky="nsew") self.subframes[TemporalBribers.__name__].grid(row=0, column=1, rowspan=2, padx=10, pady=10, sticky="nsew") self.subframes[TemporalGeneration.__name__].grid(row=1, column=0, rowspan=2, padx=10, pady=10, sticky="nsew") self.subframes[TemporalRatingMethod.__name__].grid(row=1, column=1, padx=10, pady=10, sticky="nsew") run_button = tk.Button(self, text="Run", command=self.on_button) run_button.grid(row=2, column=1, pady=20, sticky='nesw') def add_briber(self, b_type, u0): self.controller.add_briber(b_type, u0) def on_button(self): graph_type = self.subframes[TemporalGeneration.__name__].get_graph_type() graph_args = self.subframes[TemporalGeneration.__name__].get_args() bribers = self.subframes[TemporalBribers.__name__].get_all_bribers() rating_method = self.subframes[TemporalRatingMethod.__name__].get_rating_method() rating_method_args = self.subframes[TemporalRatingMethod.__name__].get_args() if not bribers: # noinspection PyUnresolvedReferences tk.messagebox.showerror(message="Graph needs one or more bribers") return try: for briber in bribers: strat_type = briber[0] briber_args = briber[1:] self.controller.add_briber(strat_type, *(briber_args[:-2])) true_averages = np.asarray([args[-2] for args in bribers]) true_std_devs = np.asarray([args[-1] for args in bribers]) params = self.subframes[TemporalSettings.__name__].get_args() + (true_averages, true_std_devs) self.controller.add_graph(graph_type, graph_args, params) self.controller.g.set_rating_method(rating_method) if rating_method == RatingMethod.P_GAMMA_RATING: self.controller.g.set_gamma(rating_method_args[0]) self.controller.update_results() except Exception as e: if issubclass(e.__class__, BribeNetException): # noinspection PyUnresolvedReferences tk.messagebox.showerror(message=f"{e.__class__.__name__}: {str(e)}") self.controller.clear_graph() return self.controller.clear_graph() raise e self.controller.show_frame("GraphFrame")
/src/BribeNet/gui/classes/param_list_frame.py
import abc import os import tkinter as tk from PIL import ImageTk, Image from BribeNet.gui.classes.tooltip import ToolTip class ParamListFrame(tk.Frame, abc.ABC): name = "ABC" def __init__(self, parent): super().__init__(parent) self.parent = parent self.params = {} self.descriptions = {} img_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'info.png') self.info_img = ImageTk.PhotoImage(Image.open(img_path)) self.tooltips = [] self.images = [] def get_args(self): return tuple(p.get() for p in self.params.values()) def get_name(self): return self.name def grid_params(self, show_name=True): offset = 0 if show_name: name_label = tk.Label(self, text=self.name) name_label.grid(row=0, column=0, columnspan=3, pady=10) offset = 1 for i, (name, var) in enumerate(self.params.items()): label = tk.Label(self, text=name) label.grid(row=i + offset, column=0) canvas_frame = tk.Frame(self) canvas = tk.Canvas(master=canvas_frame, width=16, height=16) self.tooltips.append(ToolTip(canvas_frame, self.descriptions[name])) canvas_frame.bind('<Enter>', self.tooltips[i].show_tip) canvas_frame.bind('<Leave>', self.tooltips[i].hide_tip) self.images.append(canvas.create_image(0, 0, anchor=tk.NW, image=self.info_img)) entry = tk.Entry(self, textvariable=var) canvas.pack() canvas_frame.grid(row=i + offset, column=1, padx=30) entry.grid(row=i + offset, column=2)
/src/BribeNet/gui/classes/tooltip.py
import tkinter as tk # noinspection PyUnusedLocal class ToolTip(object): """ Show a tooltip from https://stackoverflow.com/a/56749167/5539184 """ def __init__(self, widget, text): self.widget = widget self.tip_window = None self.id = None self.x = self.y = 0 self.text = text def show_tip(self, *args): if self.tip_window is not None or not self.text: return x, y, cx, cy = self.widget.bbox("insert") x = x + self.widget.winfo_rootx() + 57 y = y + cy + self.widget.winfo_rooty() + 27 self.tip_window = tw = tk.Toplevel(self.widget) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) label = tk.Label(tw, text=self.text, wraplength=400, justify=tk.LEFT, background="#ffffe0", relief=tk.SOLID, borderwidth=1, font=("tahoma", "10", "normal")) label.pack(ipadx=1) def hide_tip(self, *args): if self.tip_window is not None: self.tip_window.destroy() self.tip_window = None
/src/BribeNet/gui/main.py
import tkinter as tk from BribeNet.gui.apps.main import Main from BribeNet.gui.apps.static.static import StaticGUI from BribeNet.gui.apps.temporal.main import TemporalGUI from BribeNet.helpers.override import override class GUI(tk.Tk): """ Main menu window for the GUI Self-withdraws when model wizard opened, and deiconifies when wizard closed """ def __init__(self, *args, **kwargs): tk.Tk.__init__(self, *args, **kwargs) self.title("Bribery Networks") self.main_frame = Main(self) self.main_frame.grid(row=1, column=1) self.grid_rowconfigure(1, weight=1) self.grid_columnconfigure(1, weight=1) self.minsize(400, 400) self.static_gui = None self.temporal_gui = None def show_static_gui(self): if (self.static_gui is None) and (self.temporal_gui is None): self.static_gui = StaticGUI(self) self.withdraw() def show_temporal_gui(self): if (self.static_gui is None) and (self.temporal_gui is None): self.temporal_gui = TemporalGUI(self) self.withdraw() def show_main(self): self.static_gui = None self.temporal_gui = None self.deiconify() @override def destroy(self): if self.static_gui is not None: self.static_gui.destroy() if self.temporal_gui is not None: self.temporal_gui.destroy() super().destroy() if __name__ == "__main__": app = GUI() app.mainloop()
/src/BribeNet/helpers/bribeNetException.py
class BribeNetException(Exception): pass
/src/BribeNet/prediction/parameterPrediction.py
# noinspection PyUnresolvedReferences from networkit.centrality import LocalClusteringCoefficient # noinspection PyUnresolvedReferences from networkit.distance import APSP from networkit.generators import WattsStrogatzGenerator from numpy import logspace from numpy import sum as np_sum TRIALS = 5 INFINITY = float("inf") ''' Finds the clustering coefficient of a given graph. ''' class ParameterPrediction(object): def __init__(self, graph): self.__g = graph def average_clustering(self, turbo=True): # If graphs get too large, turn off Turbo mode (which requires more memory) lcc = LocalClusteringCoefficient(self.__g, turbo) lcc.run() scores = lcc.scores() return sum(scores) / len(scores) ''' Finds the average shortest path length of a given graph. ''' def average_shortest_path_length(self): apsp = APSP(self.__g) apsp.run() n = self.__g.numberOfNodes() # npsum needed as we are summing values in a matrix # Note! The matrix returned by getDistances is n*n, but we divide by n*n-1 # since the central diagonal represents distances from a node to itself. distances = apsp.getDistances() return np_sum(distances) / (n * (n - 1)) ''' Given an existing graph (from networkx), predict the parameters that should be used given. Returns (n,k,p), where: n: the number of nodes k: the degree of nodes of the starting regular graph (that we rewire) p: the probability of rewiring ''' def predict_small_world(self): n = self.__g.numberOfNodes() k = sum([len(self.__g.neighbors(i)) for i in self.__g.iterNodes()]) // (2 * n) probs = logspace(-5, 0, 64, False, 10) (lvs, cvs, l0, c0) = self.generate_example_graphs(n, k, probs) lp = self.average_shortest_path_length() l_ratio = lp / l0 cp = self.average_clustering() c_ratio = cp / c0 # Find the p according to l and c ratios index_l = self.closest_index(lvs, l_ratio) index_c = self.closest_index(cvs, c_ratio) prob_l = probs[index_l] prob_c = probs[index_c] p = (prob_l + prob_c) / 2 return n, k, p @staticmethod def closest_index(values, target): min_diff = INFINITY best = 0 for i in range(len(values)): lv = values[i] diff = abs(lv - target) if diff < min_diff: best = i min_diff = diff return best ''' For a set of p-values, generate existing WS graphs and get the values of L(p)/L(0) and C(p)/C(0). Returns (l_values, c_values, l0, c0) ''' @staticmethod def generate_example_graphs(n, k, ps): generator0 = WattsStrogatzGenerator(n, k, 0) graph0 = generator0.generate() pred0 = ParameterPrediction(graph0) l0 = pred0.average_shortest_path_length() c0 = pred0.average_clustering() result = ([], [], l0, c0) for p in ps: l_tot = 0 c_tot = 0 generator = WattsStrogatzGenerator(n, k, p) for i in range(TRIALS): graph = generator.generate() pred_i = ParameterPrediction(graph) l_tot += pred_i.average_shortest_path_length() c_tot += pred_i.average_clustering() lp = l_tot / TRIALS cp = c_tot / TRIALS result[0].append(lp / l0) result[1].append(cp / c0) return result def test_parameter_prediction(): print("Testing small world prediction with obviously Watts-Strogatz Graph (50,6,0.1)") generator = WattsStrogatzGenerator(50, 6, 0.1) pred = ParameterPrediction(generator.generate()) print(pred.predict_small_world()) if __name__ == '__main__': test_parameter_prediction()
/src/docker_main.py
from sys import exit from BribeNet.gui.main import GUI """ Due to a bug where app.mainloop() will not exit on closing of the root Tk instance if a Toplevel was at any stage instantiated, we use sys.exit(0) to 'hard exit' such that the Docker container does not hang after closing. """ def hard_exit(tk_app): tk_app.destroy() exit(0) if __name__ == "__main__": app = GUI() app.protocol("WM_DELETE_WINDOW", lambda: hard_exit(app)) app.mainloop()
/test/BribeNet/bribery/static/briberTestCase.py
from abc import ABC, abstractmethod from unittest import TestCase from BribeNet.bribery.static.nonBriber import NonBriber from BribeNet.graph.static.ratingGraph import StaticRatingGraph class BriberTestCase(TestCase, ABC): @abstractmethod def setUp(self) -> None: self.briber = NonBriber(1) self.rg = StaticRatingGraph(self.briber) def tearDown(self) -> None: del self.briber, self.rg def _p_rating_increase(self, g1, g2): rating2 = g2.eval_graph() rating1 = g1.eval_graph() self.assertGreaterEqual(rating2, rating1) return None
/test/BribeNet/bribery/static/test_oneMoveInfluentialNodeBriber.py
from copy import deepcopy from BribeNet.bribery.static.oneMoveInfluentialNodeBriber import OneMoveInfluentialNodeBriber from BribeNet.graph.static.ratingGraph import StaticRatingGraph from test.BribeNet.bribery.static.briberTestCase import BriberTestCase class TestOneMoveInfluentialNodeBriber(BriberTestCase): def setUp(self) -> None: self.briber = OneMoveInfluentialNodeBriber(10) self.rg = StaticRatingGraph(self.briber) def test_next_bribe_increases_p_rating(self): initial_g = deepcopy(self.briber._g) self.briber.next_bribe() self._p_rating_increase(initial_g, self.briber._g)
/test/BribeNet/bribery/static/test_oneMoveRandomBriber.py
from copy import deepcopy from BribeNet.bribery.static.oneMoveRandomBriber import OneMoveRandomBriber from BribeNet.graph.static.ratingGraph import StaticRatingGraph from test.BribeNet.bribery.static.briberTestCase import BriberTestCase class TestOneMoveInfluentialNodeBriber(BriberTestCase): def setUp(self) -> None: self.briber = OneMoveRandomBriber(10) self.rg = StaticRatingGraph(self.briber) def test_next_bribe_increases_p_rating(self): initial_g = deepcopy(self.briber._g) self.briber.next_bribe() self._p_rating_increase(initial_g, self.briber._g)
/test/BribeNet/bribery/static/test_randomBriber.py
from BribeNet.bribery.static.randomBriber import RandomBriber from BribeNet.graph.static.ratingGraph import StaticRatingGraph from test.BribeNet.bribery.static.briberTestCase import BriberTestCase class TestRandomBriber(BriberTestCase): def setUp(self) -> None: self.briber = RandomBriber(10) self.rg = StaticRatingGraph(self.briber) def test_next_bribe_does_not_exceed_budget(self): self.briber.next_bribe() self.assertTrue(self.briber.get_resources() >= 0)
/test/BribeNet/bribery/temporal/action/test_briberyAction.py
from unittest import TestCase from unittest.mock import MagicMock from BribeNet.bribery.temporal.action.briberyAction import BriberyActionTimeNotCorrectException, \ BriberyActionExecutedMultipleTimesException from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph class TestBriberyAction(TestCase): def setUp(self) -> None: self.briber = NonBriber(1) self.graph = NoCustomerActionGraph(self.briber) self.action = SingleBriberyAction(self.briber) def test_perform_action_fails_if_at_different_times(self): try: self.graph.get_time_step = MagicMock(return_value=self.action.get_time_step()+1) self.action.perform_action() except BriberyActionTimeNotCorrectException: return self.fail() def test_perform_action_fails_if_already_executed(self): try: self.action.add_bribe(0, 0.01) self.action.perform_action() self.action.perform_action() except BriberyActionExecutedMultipleTimesException: return self.fail()
/test/BribeNet/bribery/temporal/action/test_multiBriberyAction.py
from unittest import TestCase from BribeNet.bribery.temporal.action.multiBriberyAction import MultiBriberyAction, \ BriberyActionsAtDifferentTimesException, BriberyActionsOnDifferentGraphsException, \ NoActionsToFormMultiActionException from BribeNet.bribery.temporal.action import * from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from unittest.mock import MagicMock # noinspection PyBroadException class TestMultiBriberyAction(TestCase): def setUp(self) -> None: self.bribers = (NonBriber(1), NonBriber(1), NonBriber(1), NonBriber(1)) self.valid_action_dict = {0: {0: 0.5}, 2: {0: 0.5}, 3: {0: 0.5}} self.graph = NoCustomerActionGraph(self.bribers) def tearDown(self) -> None: del self.bribers, self.graph def test_add_bribe_fails_if_bribe_not_greater_than_zero(self): action = MultiBriberyAction(self.graph) self.assertRaises(BribeMustBeGreaterThanZeroException, action.add_bribe, 0, 0, -1.0) def test_add_bribe_fails_if_node_id_not_present(self): action = MultiBriberyAction(self.graph) self.assertRaises(NodeDoesNotExistException, action.add_bribe, 0, -1, 1.0) def test_add_bribe_fails_if_briber_id_not_present_1(self): action = MultiBriberyAction(self.graph) self.assertRaises(BriberDoesNotExistException, action.add_bribe, -1, 0, 1.0) def test_add_bribe_fails_if_briber_id_not_present_2(self): action = MultiBriberyAction(self.graph) self.assertRaises(BriberDoesNotExistException, action.add_bribe, 4, 0, 1.0) def test_add_bribe_passes_1(self): action = MultiBriberyAction(self.graph) action.add_bribe(0, 0, 1.0) self.assertEqual(action._bribes[0][0], 1.0) def test_add_bribe_passes_2(self): action = MultiBriberyAction(self.graph, bribes={0: {0: 1.0}}) action.add_bribe(0, 0, 1.0) self.assertEqual(action._bribes[0][0], 2.0) def test_perform_action_fails_when_bribes_exceed_budget(self): action = MultiBriberyAction(self.graph, bribes={0: {0: 10.0}}) self.assertRaises(BriberyActionExceedsAvailableUtilityException, action.perform_action) def test_perform_action(self): action = MultiBriberyAction(self.graph, bribes=self.valid_action_dict) action.perform_action() self.assertTrue(action.get_performed()) def test_make_multi_action_from_single_actions_fails_if_on_different_graphs(self): other_briber = NonBriber(1) # noinspection PyUnusedLocal other_graph = NoCustomerActionGraph(other_briber) action0 = SingleBriberyAction(other_briber) action1 = SingleBriberyAction(self.bribers[0]) self.assertRaises(BriberyActionsOnDifferentGraphsException, MultiBriberyAction.make_multi_action_from_single_actions, [action0, action1]) def test_make_multi_action_from_single_actions_fails_if_no_actions(self): self.assertRaises(NoActionsToFormMultiActionException, MultiBriberyAction.make_multi_action_from_single_actions, []) def test_make_multi_action_from_single_actions_fails_if_bribe_not_greater_than_zero(self): action = SingleBriberyAction(self.bribers[0]) action._bribes[0] = -1.0 self.assertRaises(BribeMustBeGreaterThanZeroException, MultiBriberyAction.make_multi_action_from_single_actions, [action]) def test_make_multi_action_from_single_actions_fails_if_at_different_times(self): action0 = SingleBriberyAction(self.bribers[0]) action1 = SingleBriberyAction(self.bribers[1]) action0.get_time_step = MagicMock(return_value=action0.get_time_step()+1) self.assertRaises(BriberyActionsAtDifferentTimesException, MultiBriberyAction.make_multi_action_from_single_actions, [action0, action1]) def test_make_multi_action_from_single_actions(self): single_actions = [SingleBriberyAction(self.bribers[i], self.valid_action_dict[i]) for i in self.valid_action_dict.keys()] multi_action = MultiBriberyAction.make_multi_action_from_single_actions(single_actions) self.assertEqual(multi_action._bribes, self.valid_action_dict)
/test/BribeNet/bribery/temporal/action/test_singleBriberyAction.py
from unittest import TestCase from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from BribeNet.bribery.temporal.action import * class TestSingleBriberyAction(TestCase): def setUp(self) -> None: self.briber = NonBriber(1) self.graph = NoCustomerActionGraph(self.briber) def test_add_bribe_fails_if_bribe_not_greater_than_zero(self): action = SingleBriberyAction(self.briber) self.assertRaises(BribeMustBeGreaterThanZeroException, action.add_bribe, 0, -1.0) def test_add_bribe_fails_if_node_id_not_present(self): action = SingleBriberyAction(self.briber) self.assertRaises(NodeDoesNotExistException, action.add_bribe, -1, 1.0) def test_add_bribe_passes_1(self): action = SingleBriberyAction(self.briber) action.add_bribe(0, 1.0) self.assertEqual(action._bribes[0], 1.0) def test_add_bribe_passes_2(self): action = SingleBriberyAction(self.briber, bribes={0: 1.0}) action.add_bribe(0, 1.0) self.assertEqual(action._bribes[0], 2.0) def test__perform_action_fails_when_bribes_exceed_budget(self): action = SingleBriberyAction(self.briber, bribes={1: 10.0}) self.assertRaises(BriberyActionExceedsAvailableUtilityException, action._perform_action) def test_perform_action(self): action = SingleBriberyAction(self.briber, bribes={0: 0.5}) action.perform_action() self.assertTrue(action.get_performed())
/test/BribeNet/bribery/temporal/briberTestCase.py
from abc import ABC, abstractmethod from unittest import TestCase from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph class BriberTestCase(TestCase, ABC): @abstractmethod def setUp(self) -> None: self.briber = NonBriber(0) self.rg = NoCustomerActionGraph(self.briber) def tearDown(self) -> None: del self.briber, self.rg
/test/BribeNet/bribery/temporal/test_budgetBriber.py
from BribeNet.bribery.temporal.budgetNodeBriber import BudgetNodeBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from test.BribeNet.bribery.temporal.briberTestCase import BriberTestCase from unittest.mock import MagicMock class TestBudgetBriber(BriberTestCase): def setUp(self) -> None: self.briber = BudgetNodeBriber(10, b=0.5) self.rg = NoCustomerActionGraph(self.briber) def test_next_action_increases_p_rating(self): graph = self.briber._g action = self.briber.next_action() briber_id = self.briber.get_briber_id() prev_eval = graph.eval_graph(briber_id=briber_id) action.perform_action() self.assertGreaterEqual(graph.eval_graph(briber_id=briber_id), prev_eval) def test_next_action_bribes_if_suitable(self): graph = self.briber._g self.briber._previous_rating = 0 graph.eval_graph = MagicMock(return_value=1) graph.get_vote = MagicMock(return_value=[0.5]) self.briber._next_node = 0 action = self.briber.next_action() self.assertDictEqual(action._bribes, {0: 0.5}) def test_next_action_moves_on_if_not_influential(self): graph = self.briber._g self.briber._previous_rating = 1 graph.eval_graph = MagicMock(return_value=1) # will never be influential graph.get_vote = MagicMock(return_value=[1.0]) # will always be affordable prev_nodes = [] for i in range(graph.customer_count()): action = self.briber.next_action() for prev_node in prev_nodes: self.assertNotIn(prev_node, action._bribes) prev_nodes.append(self.briber._next_node) def test_next_action_moves_on_if_not_in_budget(self): graph = self.briber._g graph.eval_graph = MagicMock(return_value=1) graph.get_vote = MagicMock(return_value=[0.0]) # will always be not in budget prev_nodes = [] for i in range(graph.customer_count()): self.briber._previous_rating = 0 # will always be influential action = self.briber.next_action() for prev_node in prev_nodes: self.assertNotIn(prev_node, action._bribes) prev_nodes.append(self.briber._next_node) def test_next_action_does_not_fail_if_no_nodes_influential(self): graph = self.briber._g self.briber._previous_rating = 1 graph.eval_graph = MagicMock(return_value=1) # will never be influential graph.get_vote = MagicMock(return_value=[1.0]) # will always be affordable prev_nodes = [] for i in range(graph.customer_count() + 1): action = self.briber.next_action() for prev_node in prev_nodes: self.assertNotIn(prev_node, action._bribes) prev_nodes.append(self.briber._next_node)
/test/BribeNet/bribery/temporal/test_mostInfluentialBriber.py
from BribeNet.bribery.temporal.mostInfluentialNodeBriber import MostInfluentialNodeBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from test.BribeNet.bribery.temporal.briberTestCase import BriberTestCase from unittest.mock import MagicMock TEST_I = 7 class TestMostInfluentialBriber(BriberTestCase): def setUp(self) -> None: self.briber = MostInfluentialNodeBriber(10, i=TEST_I) self.rg = NoCustomerActionGraph(self.briber) def test_next_action_increases_p_rating(self): graph = self.briber._g action = self.briber.next_action() briber_id = self.briber.get_briber_id() prev_eval = graph.eval_graph(briber_id=briber_id) action.perform_action() self.assertGreaterEqual(graph.eval_graph(briber_id=briber_id), prev_eval) def test_next_action_gains_information_for_suitable_time(self): prev_nodes = [] for i in range(TEST_I - 1): action = self.briber.next_action() self.assertEqual(len(action._bribes), 1) for prev_node in prev_nodes: self.assertNotIn(prev_node, action._bribes) prev_nodes.append(self.briber._next_node) def test_next_action_performs_bribe_on_best_node(self): self.briber._c = self.briber._i self.briber._best_node = 1 graph = self.briber._g graph.eval_graph = MagicMock(return_value=0) action = self.briber.next_action() self.assertIn(1, action._bribes) self.assertEqual(self.briber._c, 0) self.assertEqual(self.briber._max_rating_increase, 0) def test_next_action_finds_best_node(self): graph = self.briber._g graph.eval_graph = MagicMock(return_value=10) graph.get_random_customer = MagicMock(return_value=3) self.briber._previous_rating = 1 self.briber._max_rating_increase = 0 action = self.briber.next_action() self.assertIn(3, action._bribes) self.assertEqual(self.briber._max_rating_increase, 9) def test_next_action_does_not_fail_if_no_nodes_influential_within_i_step(self): graph = self.briber._g self.briber._previous_rating = 1 graph.eval_graph = MagicMock(return_value=1) # will never be influential prev_nodes = [] for i in range(TEST_I + 1): action = self.briber.next_action() for prev_node in prev_nodes: self.assertNotIn(prev_node, action._bribes) prev_nodes.append(self.briber._next_node) def test_next_action_does_not_fail_if_no_nodes_influential_at_all(self): graph = self.briber._g self.briber._previous_rating = 1 graph.eval_graph = MagicMock(return_value=1) # will never be influential prev_nodes = [] for i in range(graph.customer_count() + 1): action = self.briber.next_action() for prev_node in prev_nodes: self.assertNotIn(prev_node, action._bribes) prev_nodes.append(self.briber._next_node)
/test/BribeNet/bribery/temporal/test_oneMoveRandomBriber.py
from BribeNet.bribery.temporal.oneMoveRandomBriber import OneMoveRandomBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from test.BribeNet.bribery.temporal.briberTestCase import BriberTestCase class TestOneMoveRandomBriber(BriberTestCase): def setUp(self) -> None: self.briber = OneMoveRandomBriber(10) self.rg = NoCustomerActionGraph(self.briber) def test_next_action_increases_p_rating(self): graph = self.briber._g action = self.briber.next_action() briber_id = self.briber.get_briber_id() prev_eval = graph.eval_graph(briber_id=briber_id) action.perform_action() self.assertGreaterEqual(graph.eval_graph(briber_id=briber_id), prev_eval)
/test/BribeNet/bribery/temporal/test_randomBriber.py
from BribeNet.bribery.temporal.randomBriber import RandomBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from test.BribeNet.bribery.temporal.briberTestCase import BriberTestCase class TestRandomBriber(BriberTestCase): def setUp(self) -> None: self.briber = RandomBriber(10) self.rg = NoCustomerActionGraph(self.briber) def test_next_action_increases_p_rating(self): graph = self.briber._g action = self.briber.next_action() briber_id = self.briber.get_briber_id() prev_eval = graph.eval_graph(briber_id=briber_id) action.perform_action() self.assertGreaterEqual(graph.eval_graph(briber_id=briber_id), prev_eval)
/test/BribeNet/bribery/test_briber.py
import random from test.BribeNet.bribery.static.briberTestCase import BriberTestCase from BribeNet.bribery.briber import BriberyGraphAlreadySetException, BriberyGraphNotSetException from BribeNet.bribery.static.nonBriber import NonBriber class TestBriber(BriberTestCase): def setUp(self) -> None: super().setUp() def test_bribe(self): initial_u = self.briber.get_resources() bribe = random.randrange(0, initial_u) self.briber.bribe(0, bribe) self.assertEqual(self.briber.get_resources(), initial_u-bribe) def test_next_bribe_fails_if_graph_not_set(self): briber = NonBriber(0) self.assertRaises(BriberyGraphNotSetException, briber.next_bribe) def test_set_graph_fails_if_graph_already_set(self): self.assertRaises(BriberyGraphAlreadySetException, self.briber._set_graph, self.rg)
/test/BribeNet/graph/generation/test_unweightedGenerator.py
from unittest import TestCase from BribeNet.graph.generation.unweightedGenerator import UnweightedGraphGenerator from BribeNet.graph.generation import GraphGeneratorAlgo class TestUnweightedGraphGenerator(TestCase): def test_generate_ws(self): graph_gen = UnweightedGraphGenerator(GraphGeneratorAlgo.WATTS_STROGATZ, 30, 5, 0.3) graph = graph_gen.generate() self.assertFalse(graph.isWeighted()) def test_generate_ba(self): graph_gen = UnweightedGraphGenerator(GraphGeneratorAlgo.BARABASI_ALBERT, 5, 30, 0, True) graph = graph_gen.generate() self.assertFalse(graph.isWeighted()) def test_generate_composite(self): graph_gen = UnweightedGraphGenerator(GraphGeneratorAlgo.COMPOSITE, 30, 15, 50, 0.1, 2) graph = graph_gen.generate() self.assertFalse(graph.isWeighted())
/test/BribeNet/graph/generation/test_weightedGenerator.py
from unittest import TestCase from BribeNet.graph.generation.flatWeightGenerator import FlatWeightedGraphGenerator from BribeNet.graph.generation import GraphGeneratorAlgo class TestFlatWeightedGraphGenerator(TestCase): def test_generate_ws(self): graph_gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.WATTS_STROGATZ, 30, 5, 0.3) graph = graph_gen.generate() self.assertTrue(graph.isWeighted()) def test_generate_ba(self): graph_gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.BARABASI_ALBERT, 5, 30, 0, True) graph = graph_gen.generate() self.assertTrue(graph.isWeighted()) def test_generate_composite(self): graph_gen = FlatWeightedGraphGenerator(GraphGeneratorAlgo.COMPOSITE, 30, 15, 50, 0.1, 2) graph = graph_gen.generate() self.assertTrue(graph.isWeighted())
/test/BribeNet/graph/static/test_multiBriberRatingGraph.py
from copy import deepcopy from unittest import TestCase from BribeNet.bribery.static.nonBriber import NonBriber from BribeNet.bribery.static.randomBriber import RandomBriber from BribeNet.graph.static.ratingGraph import StaticRatingGraph class TestMultiBriberRatingGraph(TestCase): def setUp(self) -> None: # noinspection PyTypeChecker self.rg = StaticRatingGraph((RandomBriber(10), NonBriber(10))) def tearDown(self) -> None: del self.rg def test_neighbours(self): for i in range(len(self.rg.get_bribers())): for node in self.rg.get_customers(): self.assertIsInstance(self.rg._neighbours(node, i), list) def test_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_rating(i, b) >= 0) def test_median_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._median_p_rating(i, b) >= 0) def test_sample_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._sample_p_rating(i, b) >= 0) def test_p_gamma_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) self.assertAlmostEqual(self.rg._p_gamma_rating(i, gamma=0), self.rg._p_rating(i)) def test_weighted_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) def test_weighted_median_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) def test_o_rating(self): for b in range(len(self.rg.get_bribers())): rating = self.rg._o_rating(b) self.assertTrue(rating >= 0) def test_is_influential(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertGreaterEqual(self.rg.is_influential(i, 0.2, b, charge_briber=False), 0) def test_bribe(self): for i in range(len(self.rg.get_bribers())): initial_value = self.rg.eval_graph(i) for j in self.rg.get_customers(): g_copy = deepcopy(self.rg) g_copy.bribe(j, 0.1, i) bribed_value = g_copy.eval_graph(i) self.assertTrue(initial_value != bribed_value) def test_eval_graph(self): for b in range(len(self.rg.get_bribers())): self.assertGreaterEqual(self.rg.eval_graph(b), 0) def test_trust(self): for u in self.rg.get_customers(): for v in self.rg.get_customers(): trust1 = self.rg.trust(u, v) trust2 = self.rg.trust(v, u) self.assertEqual(trust1, trust2) self.assertGreaterEqual(trust1, 0) self.assertLessEqual(trust1, 1)
/test/BribeNet/graph/static/test_ratingGraphBuilder.py
from unittest import TestCase from BribeNet.bribery.static.influentialNodeBriber import InfluentialNodeBriber from BribeNet.bribery.static.mostInfluentialNodeBriber import MostInfluentialNodeBriber from BribeNet.bribery.static.nonBriber import NonBriber from BribeNet.bribery.static.oneMoveInfluentialNodeBriber import OneMoveInfluentialNodeBriber from BribeNet.bribery.static.oneMoveRandomBriber import OneMoveRandomBriber from BribeNet.bribery.static.randomBriber import RandomBriber from BribeNet.graph.static.ratingGraphBuilder import RatingGraphBuilder, BriberType class TestRatingGraphBuilder(TestCase): def setUp(self) -> None: self.builder = RatingGraphBuilder() def tearDown(self) -> None: del self.builder def test_add_briber(self): classes = zip(BriberType._member_names_, [NonBriber, RandomBriber, OneMoveRandomBriber, InfluentialNodeBriber, MostInfluentialNodeBriber, OneMoveInfluentialNodeBriber]) for b, c in classes: self.builder.add_briber(getattr(BriberType, b), u0=10) self.assertIsInstance(self.builder.bribers[-1], c) def test_build_no_bribers(self): rg = self.builder.build() self.assertIsInstance(rg.get_bribers()[0], NonBriber) def test_build_one_briber(self): self.builder.add_briber(BriberType.Random) rg = self.builder.build() self.assertIsInstance(rg.get_bribers()[0], RandomBriber) def test_build_multiple_bribers(self): self.builder.add_briber(BriberType.Random).add_briber(BriberType.InfluentialNode) rg = self.builder.build() bribers = rg.get_bribers() self.assertEqual(len(bribers), 2) self.assertIsInstance(bribers[0], RandomBriber) self.assertIsInstance(bribers[1], InfluentialNodeBriber)
/test/BribeNet/graph/static/test_singleBriberRatingGraph.py
from copy import deepcopy from unittest import TestCase from BribeNet.bribery.static.nonBriber import NonBriber from BribeNet.graph.static.ratingGraph import StaticRatingGraph class TestSingleBriberRatingGraph(TestCase): def setUp(self) -> None: self.rg = StaticRatingGraph(NonBriber(0)) def tearDown(self) -> None: del self.rg def test_neighbors(self): for i in self.rg.get_customers(): self.assertIsInstance(self.rg._neighbours(i), list) def test_p_rating(self): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_rating(i) >= 0) def test_median_p_rating(self): for i in self.rg.get_customers(): self.assertTrue(self.rg._median_p_rating(i) >= 0) def test_sample_p_rating(self): for i in self.rg.get_customers(): self.assertTrue(self.rg._sample_p_rating(i) >= 0) def test_weighted_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) def test_weighted_median_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) def test_p_gamma_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) self.assertAlmostEqual(self.rg._p_gamma_rating(i, gamma=0), self.rg._p_rating(i)) def test_o_rating(self): self.assertTrue(self.rg._o_rating() >= 0) def test_bribe(self): initial_value = self.rg.eval_graph() for i in self.rg.get_customers(): g_copy = deepcopy(self.rg) g_copy.bribe(i, 0.1) bribed_value = g_copy.eval_graph() self.assertTrue(initial_value != bribed_value)
/test/BribeNet/graph/temporal/action/test_customerAction.py
from unittest import TestCase from BribeNet.bribery.temporal.action.singleBriberyAction import SingleBriberyAction from BribeNet.graph.temporal.action.customerAction import CustomerAction, CustomerActionExecutedMultipleTimesException,\ CustomerActionTimeNotCorrectException from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph from BribeNet.graph.temporal.action.actionType import ActionType from random import sample, randint, shuffle from unittest.mock import MagicMock class TestCustomerAction(TestCase): def setUp(self) -> None: self.briber = NonBriber(0) self.rg = NoCustomerActionGraph(self.briber) def test_set_bribed_from_bribery_action(self): nodes = self.rg.get_customers() for _ in range(10): customer_action = CustomerAction(self.rg) bribery_action = SingleBriberyAction(self.briber) bribed_nodes = sample(nodes, randint(1, len(nodes))) for bribed_node in bribed_nodes: bribery_action.add_bribe(bribed_node, 1.0) customer_action.set_bribed_from_bribery_action(bribery_action) bribed_in_customer_action = [c[0] for c in customer_action.actions.items() if c[1][0] == ActionType.BRIBED] self.assertEqual(set(bribed_in_customer_action), set(bribed_nodes)) not_bribed_in_customer_action = [c[0] for c in customer_action.actions.items() if c[1][0] != ActionType.BRIBED] self.assertEqual(set(not_bribed_in_customer_action) & set(bribed_nodes), set()) @staticmethod def __partition(list_in, n): shuffle(list_in) return [list_in[i::n] for i in range(n)] def test_perform_action_runs_normally(self): nodes = self.rg.get_customers() for _ in range(10): customer_action = CustomerAction(self.rg) partition = TestCustomerAction.__partition(nodes, 3) for n in partition[0]: customer_action.set_bribed(n, [0]) for n in partition[1]: customer_action.set_select(n, 0) customer_action.perform_action(0) self.assertTrue(customer_action.get_performed()) def test_perform_action_fails_when_time_incorrect(self): customer_action = CustomerAction(self.rg) self.rg.get_time_step = MagicMock(return_value=self.rg.get_time_step()+1) self.assertRaises(CustomerActionTimeNotCorrectException, customer_action.perform_action, 0) def test_perform_action_fails_when_executed_twice(self): customer_action = CustomerAction(self.rg) customer_action.perform_action(0) self.assertRaises(CustomerActionExecutedMultipleTimesException, customer_action.perform_action, 0)
/test/BribeNet/graph/temporal/test_multiBriberRatingGraph.py
from copy import deepcopy from unittest import TestCase from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.bribery.temporal.randomBriber import RandomBriber from BribeNet.graph.temporal.noCustomerActionGraph import NoCustomerActionGraph class TestMultiBriberRatingGraph(TestCase): def setUp(self) -> None: self.rg = NoCustomerActionGraph((RandomBriber(10), NonBriber(10))) def tearDown(self) -> None: del self.rg def test_neighbours(self): for i in range(len(self.rg.get_bribers())): for node in self.rg.get_customers(): self.assertIsInstance(self.rg._neighbours(node, i), list) def test_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_rating(i, b) >= 0) def test_median_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._median_p_rating(i, b) >= 0) def test_sample_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._sample_p_rating(i, b) >= 0) def test_o_rating(self): for b in range(len(self.rg.get_bribers())): self.assertTrue(self.rg._o_rating(b) >= 0) def test_p_gamma_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) self.assertAlmostEqual(self.rg._p_gamma_rating(i, gamma=0), self.rg._p_rating(i)) def test_weighted_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) def test_weighted_median_p_rating(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertTrue(self.rg._p_gamma_rating(i) >= 0) def test_is_influential(self): for b in range(len(self.rg.get_bribers())): for i in self.rg.get_customers(): self.assertGreaterEqual(self.rg.is_influential(i, 0.2, b, charge_briber=False), 0) def test_bribe(self): for i in range(len(self.rg.get_bribers())): initial_value = self.rg.eval_graph(i) for j in self.rg.get_customers(): g_copy = deepcopy(self.rg) g_copy.bribe(j, 0.1, i) bribed_value = g_copy.eval_graph(i) self.assertTrue(initial_value != bribed_value) def test_eval_graph(self): for b in range(len(self.rg.get_bribers())): self.assertGreaterEqual(self.rg.eval_graph(b), 0) def test_trust_update(self): # Set all votes to 0. g_copy = deepcopy(self.rg) for u in g_copy.get_customers(): g_copy._votes[u][0] = 0 for c in g_copy.get_customers(): g_copy_2 = deepcopy(g_copy) # Then bribe one individual. g_copy_2.bribe(0, 1, 0) # Update the trust. g_copy_2._update_trust() # Make sure that the trust goes down for each connected node. for n in g_copy.get_customers(): if self.rg._g.hasEdge(c, n): initial_trust = g_copy.get_weight(c, n) updated_trust = g_copy_2.get_weight(c, n) self.assertGreaterEqual(initial_trust, updated_trust)
/test/BribeNet/graph/temporal/test_ratingGraphBuilder.py
from unittest import TestCase class TestRatingGraphBuilder(TestCase): pass
/test/BribeNet/graph/temporal/test_thresholdGraph.py
from unittest import TestCase from BribeNet.bribery.temporal.nonBriber import NonBriber from BribeNet.graph.temporal.action.actionType import ActionType from BribeNet.graph.temporal.thresholdGraph import ThresholdGraph from unittest.mock import MagicMock class TestThresholdGraph(TestCase): def setUp(self) -> None: self.rg = ThresholdGraph((NonBriber(10), NonBriber(10)), threshold=0.4, q=0.5) def tearDown(self) -> None: del self.rg def test_customer_action_runs_successfully(self): self.rg.step() self.rg.step() action = self.rg.get_last_customer_action() self.assertIsNotNone(action) self.assertTrue(action.get_performed()) def test_customer_action_no_votes_runs_successfully(self): self.rg.get_rating = MagicMock(return_value=0) self.rg.step() self.rg.step() action = self.rg.get_last_customer_action() self.assertIsNotNone(action) for k in action.actions: self.assertNotEqual(action.actions[k], ActionType.SELECT) self.assertTrue(action.get_performed()) def test_customer_action_disconnected_graph_runs_successfully(self): self.rg._neighbours = MagicMock(return_value=[]) self.rg._q = 0.5 self.rg.step() self.rg.step() action = self.rg.get_last_customer_action() self.assertIsNotNone(action) for k in action.actions: self.assertEqual(action.actions[k][0], ActionType.SELECT) self.assertTrue(action.get_performed())
/test/BribeNet/graph/test_ratingGraph.py
from unittest import TestCase class TestRatingGraph(TestCase): """ See test/graph/static/test_singleBriberRatingGraph and test/graph/static/test_multiBriberRatingGraph """ pass
/test/BribeNet/prediction/test_parameterPrediction.py
from unittest import TestCase from networkit.generators import WattsStrogatzGenerator from numpy import logspace from BribeNet.prediction.parameterPrediction import ParameterPrediction class TestParameterPrediction(TestCase): def setUp(self) -> None: self.generator = WattsStrogatzGenerator(50, 6, 0.1) self.pred = ParameterPrediction(self.generator.generate()) def tearDown(self) -> None: del self.pred, self.generator def test_average_clustering(self): self.assertTrue(self.pred.average_clustering() > 0) def test_average_shortest_path_length(self): self.assertTrue(self.pred.average_shortest_path_length() > 0) def test_predict_small_world(self): n, k, p = self.pred.predict_small_world() self.assertTrue(n > 0) self.assertTrue(k > 0) self.assertTrue(p > 0) def test_generate_example_graphs(self): l_values, c_values, l0, c0 = ParameterPrediction.generate_example_graphs(50, 6, logspace(-5, 0, 64, False, 10)) self.assertTrue(l0 > 0) self.assertTrue(c0 > 0)
constable-ldp/gym_management_app
refs/heads/main
{"/console.py": ["/models/member.py", "/models/gym_class.py", "/models/room.py", "/models/instructor.py", "/models/schedule.py"], "/controllers/member_controller.py": ["/models/member.py"], "/repositories/member_repository.py": ["/models/member.py"], "/tests/member_test.py": ["/models/member.py"], "/run_test.py": ["/tests/instructor_test.py", "/tests/room_test.py", "/tests/schedule_test.py", "/tests/member_test.py", "/tests/gym_class_test.py"], "/controllers/class_controller.py": ["/models/gym_class.py"], "/repositories/gym_class_repository.py": ["/models/gym_class.py"], "/repositories/schedule_repository.py": ["/models/gym_class.py", "/models/room.py", "/models/schedule_member.py", "/models/instructor.py", "/models/schedule.py"], "/tests/gym_class_test.py": ["/models/gym_class.py"], "/controllers/room_controller.py": ["/models/room.py"], "/repositories/room_repository.py": ["/models/room.py"], "/tests/room_test.py": ["/models/room.py"], "/controllers/schedule_controller.py": ["/models/schedule_member.py", "/models/schedule.py"], "/controllers/instructor_controller.py": ["/models/instructor.py"], "/repositories/instructor_schedule_repository.py": ["/models/instructor.py"], "/repositories/instructor_timetable_repository.py": ["/models/instructor.py"], "/tests/instructor_test.py": ["/models/instructor.py"], "/repositories/instructor_details_repository.py": ["/models/instructor.py"], "/tests/schedule_test.py": ["/models/schedule.py"]}
└── ├── app.py ├── console.py ├── controllers │ ├── class_controller.py │ ├── instructor_controller.py │ ├── member_controller.py │ ├── room_controller.py │ └── schedule_controller.py ├── models │ ├── gym_class.py │ ├── instructor.py │ ├── member.py │ ├── room.py │ ├── schedule.py │ └── schedule_member.py ├── repositories │ ├── gym_class_repository.py │ ├── instructor_details_repository.py │ ├── instructor_schedule_repository.py │ ├── instructor_timetable_repository.py │ ├── member_repository.py │ ├── room_repository.py │ └── schedule_repository.py ├── run_test.py └── tests ├── gym_class_test.py ├── instructor_test.py ├── member_test.py ├── room_test.py └── schedule_test.py
/app.py
from flask import Flask, render_template from controllers.class_controller import classes_blueprint from controllers.member_controller import members_blueprint from controllers.room_controller import rooms_blueprint from controllers.instructor_controller import instructors_blueprint from controllers.schedule_controller import schedule_blueprint app = Flask(__name__) app.register_blueprint(classes_blueprint) app.register_blueprint(members_blueprint) app.register_blueprint(rooms_blueprint) app.register_blueprint(instructors_blueprint) app.register_blueprint(schedule_blueprint) @app.route('/') def home(): return render_template('index.html') if __name__ == '__main__': app.run(debug=True)
/console.py
from models.gym_class import GymClass from models.member import Member from models.room import Room from models.instructor import InstructorDetails, InstructorSchedule, InstructorTimetable from models.schedule import Schedule import repositories.gym_class_repository as class_repository import repositories.member_repository as member_repository import repositories.room_repository as room_repository import repositories.instructor_timetable_repository as timetable_repository import repositories.instructor_details_repository as details_repository import repositories.instructor_schedule_repository as i_schedule_repository import repositories.schedule_repository as schedule_repository import datetime class_1 = GymClass('Hot Yoga', 'Yoga in a very warm studio', 60, 16) class_2 = GymClass('CrossFit', 'Bodyweight workout', 90, 24) class_3 = GymClass('Spinning', 'Stationary indoor cycling', 60, 20) class_4 = GymClass('Adult Swimming Lessons', 'Adult Swimming Lessons', 60, 20) class_5 = GymClass('Water Aerobics', 'Water Exercises', 60, 25) class_repository.save(class_1) class_repository.save(class_2) class_repository.save(class_3) class_repository.save(class_4) class_repository.save(class_5) member_1 = Member('John', 'Smith', 'johnsmith@gmail.com', '07595964019', datetime.date(1997, 5, 17), True, False, datetime.date(2021, 3, 21), datetime.date(2022, 3, 21)) member_2 = Member('Luke', 'Jones', 'lukejones@gmail.com', '07595964018', datetime.date(1992, 1, 15), False, False, None, None) member_3 = Member('Mary', 'Taylor', 'marytaylor@gmail.com', '07595964048', datetime.date(1988, 12, 1), True, True, datetime.date(2021, 4, 15), datetime.date(2022, 7, 15)) member_4 = Member('Susan', 'Wilson', 'susanwilson@gmail.com', '07595964013', datetime.date(1968, 12, 1), False, False, None, None) member_repository.save(member_1) member_repository.save(member_2) member_repository.save(member_3) member_repository.save(member_4) room_1 = Room('Studio 1', 24, 'Large Room') room_2 = Room('Studio 2', 4, 'Small Room') room_3 = Room('Swimming Pool', 50, 'Pool') room_repository.save(room_1) room_repository.save(room_2) room_repository.save(room_3) instructor_dets_1 = InstructorDetails('Mary', 'Johnson', datetime.date(1992, 3, 12)) instructor_dets_2 = InstructorDetails('Zach', 'Smith', datetime.date(1990, 8, 14)) instructor_dets_3 = InstructorDetails('John', 'Wilson', datetime.date(1990, 8, 14)) # instructor_sch = InstructorSchedule('9-5', True, True, True, # True, True, False, False, datetime.time(9, 0), # datetime.time(17, 0)) # instructor_tim = InstructorTimetable(datetime.date(2021, 3, 21), instructor_dets, instructor_sch) details_repository.save(instructor_dets_1) details_repository.save(instructor_dets_2) details_repository.save(instructor_dets_3) # i_schedule_repository.save(instructor_sch) # timetable_repository.save(instructor_tim) schedule_1 = Schedule(datetime.date(2021, 5, 3), datetime.time(10, 0), 60, instructor_dets_1, class_1, room_1) schedule_2 = Schedule(datetime.date(2021, 5, 3), datetime.time(13, 0), 60, instructor_dets_1, class_1, room_1) schedule_3 = Schedule(datetime.date(2021, 5, 3), datetime.time(16, 0), 60, instructor_dets_1, class_1, room_1) schedule_4 = Schedule(datetime.date(2021, 5, 5), datetime.time(10, 0), 60, instructor_dets_1, class_1, room_1) schedule_5 = Schedule(datetime.date(2021, 5, 5), datetime.time(13, 0), 60, instructor_dets_1, class_1, room_1) schedule_6 = Schedule(datetime.date(2021, 5, 5), datetime.time(16, 0), 60, instructor_dets_1, class_1, room_1) schedule_7 = Schedule(datetime.date(2021, 5, 7), datetime.time(10, 0), 60, instructor_dets_1, class_1, room_1) schedule_8 = Schedule(datetime.date(2021, 5, 7), datetime.time(13, 0), 60, instructor_dets_1, class_1, room_1) schedule_9 = Schedule(datetime.date(2021, 5, 7), datetime.time(16, 0), 60, instructor_dets_1, class_1, room_1) schedule_10 = Schedule(datetime.date(2021, 5, 4), datetime.time(11, 0), 60, instructor_dets_1, class_3, room_2) schedule_11 = Schedule(datetime.date(2021, 5, 4), datetime.time(14, 0), 60, instructor_dets_1, class_3, room_2) schedule_12 = Schedule(datetime.date(2021, 5, 6), datetime.time(11, 0), 60, instructor_dets_1, class_3, room_2) schedule_13 = Schedule(datetime.date(2021, 5, 6), datetime.time(14, 0), 60, instructor_dets_1, class_3, room_2) schedule_14 = Schedule(datetime.date(2021, 5, 3), datetime.time(9, 0), 60, instructor_dets_2, class_2, room_2) schedule_15 = Schedule(datetime.date(2021, 5, 3), datetime.time(12, 0), 60, instructor_dets_2, class_2, room_2) schedule_16 = Schedule(datetime.date(2021, 5, 3), datetime.time(15, 0), 60, instructor_dets_2, class_2, room_2) schedule_17 = Schedule(datetime.date(2021, 5, 5), datetime.time(9, 0), 60, instructor_dets_2, class_2, room_2) schedule_18 = Schedule(datetime.date(2021, 5, 5), datetime.time(12, 0), 60, instructor_dets_2, class_2, room_2) schedule_19 = Schedule(datetime.date(2021, 5, 5), datetime.time(15, 0), 60, instructor_dets_2, class_2, room_2) schedule_20 = Schedule(datetime.date(2021, 5, 7), datetime.time(9, 0), 60, instructor_dets_2, class_2, room_2) schedule_21 = Schedule(datetime.date(2021, 5, 7), datetime.time(12, 0), 60, instructor_dets_2, class_2, room_2) schedule_22 = Schedule(datetime.date(2021, 5, 7), datetime.time(15, 0), 60, instructor_dets_2, class_2, room_2) schedule_23 = Schedule(datetime.date(2021, 5, 4), datetime.time(8, 0), 60, instructor_dets_2, class_4, room_3) schedule_24 = Schedule(datetime.date(2021, 5, 4), datetime.time(12, 0), 60, instructor_dets_2, class_4, room_3) schedule_25 = Schedule(datetime.date(2021, 5, 6), datetime.time(8, 0), 60, instructor_dets_2, class_4, room_3) schedule_26 = Schedule(datetime.date(2021, 5, 6), datetime.time(12, 0), 60, instructor_dets_2, class_4, room_3) schedule_27 = Schedule(datetime.date(2021, 5, 3), datetime.time(15, 0), 60, instructor_dets_3, class_5, room_3) schedule_28 = Schedule(datetime.date(2021, 5, 3), datetime.time(18, 0), 60, instructor_dets_3, class_5, room_3) schedule_29 = Schedule(datetime.date(2021, 5, 5), datetime.time(15, 0), 60, instructor_dets_3, class_5, room_3) schedule_30 = Schedule(datetime.date(2021, 5, 5), datetime.time(18, 0), 60, instructor_dets_3, class_5, room_3) schedule_31 = Schedule(datetime.date(2021, 5, 7), datetime.time(15, 0), 60, instructor_dets_3, class_5, room_3) schedule_32 = Schedule(datetime.date(2021, 5, 7), datetime.time(18, 0), 60, instructor_dets_3, class_5, room_3) schedule_repository.save(schedule_1) schedule_repository.save(schedule_2) schedule_repository.save(schedule_3) schedule_repository.save(schedule_4) schedule_repository.save(schedule_5) schedule_repository.save(schedule_6) schedule_repository.save(schedule_7) schedule_repository.save(schedule_8) schedule_repository.save(schedule_9) schedule_repository.save(schedule_10) schedule_repository.save(schedule_11) schedule_repository.save(schedule_12) schedule_repository.save(schedule_13) schedule_repository.save(schedule_14) schedule_repository.save(schedule_15) schedule_repository.save(schedule_16) schedule_repository.save(schedule_17) schedule_repository.save(schedule_18) schedule_repository.save(schedule_19) schedule_repository.save(schedule_20) schedule_repository.save(schedule_21) schedule_repository.save(schedule_22) schedule_repository.save(schedule_23) schedule_repository.save(schedule_24) schedule_repository.save(schedule_25) schedule_repository.save(schedule_26) schedule_repository.save(schedule_27) schedule_repository.save(schedule_28) schedule_repository.save(schedule_29) schedule_repository.save(schedule_30) schedule_repository.save(schedule_31) schedule_repository.save(schedule_32)
/controllers/class_controller.py
from flask import Blueprint, Flask, redirect, render_template, request from models.gym_class import GymClass import repositories.gym_class_repository as class_repository classes_blueprint = Blueprint('classes', __name__) @classes_blueprint.route('/classes') def classes(): classes = class_repository.select_all() return render_template('classes/index.html', classes=classes) @classes_blueprint.route('/classes/new') def new_class(): return render_template('classes/new.html') @classes_blueprint.route('/classes/new', methods=['POST']) def add_class(): name = request.form['name'] description = request.form['description'] max_time = request.form['max_time'] capacity = request.form['capacity'] gym_class = GymClass(name, description, max_time, capacity, id) class_repository.save(gym_class) return redirect('/classes') @classes_blueprint.route('/classes/<id>') def see_class(id): gym_class = class_repository.select(id) return render_template('classes/edit.html', gym_class=gym_class) @classes_blueprint.route('/classes/<id>', methods=['POST']) def edit_class(id): name = request.form['name'] description = request.form['description'] max_time = request.form['max_time'] capacity = request.form['capacity'] gym_class = GymClass(name, description, max_time, capacity, id) class_repository.update(gym_class) return redirect('/classes') @classes_blueprint.route('/classes/<id>/delete', methods=['POST']) def delete_class(id): class_repository.delete(id) return redirect('/classes')
/controllers/instructor_controller.py
from flask import Blueprint, Flask, redirect, render_template, request from models.instructor import InstructorTimetable, InstructorDetails, InstructorSchedule import repositories.instructor_timetable_repository as timetable_repository import repositories.instructor_details_repository as details_repository import repositories.instructor_schedule_repository as schedule_repository instructors_blueprint = Blueprint('instructors', __name__) @instructors_blueprint.route('/instructors') def instructors(): instructors = details_repository.select_all() return render_template('instructor/index.html', instructors=instructors) @instructors_blueprint.route('/instructors/new_instructor') def show_instructor(): return render_template('instructor/new_dets.html') @instructors_blueprint.route('/instructors/new_instructor', methods=['POST']) def new_instructor(): first_name = request.form['first_name'] last_name = request.form['last_name'] date_of_birth = request.form['date_of_birth'] instructor = InstructorDetails(first_name, last_name, date_of_birth, id) details_repository.save(instructor) return redirect('/instructors') @instructors_blueprint.route('/instructors/new_schedule') def show_schedule(): return render_template('instructor/new_sch.html') @instructors_blueprint.route('/instructors/new_schedule', methods=['POST']) def new_scheudle(): nickname = request.form['nickname'] variables = [False] * 7 strings = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] for index in range(len(variables)): if request.form.get(strings[index]): variables[index] = True start_time = request.form['start_time'] end_time = request.form['end_time'] instructor = InstructorSchedule(nickname, variables[0], variables[1], variables[2], variables[3], variables[4], variables[5], variables[6], start_time, end_time, id) schedule_repository.save(instructor) return redirect('/instructors') @instructors_blueprint.route('/instructors/new_timetable') def show_timetable(): instructors = details_repository.select_all() schedules = schedule_repository.select_all() return render_template('instructor/new_tim.html', instructors=instructors, schedules=schedules) @instructors_blueprint.route('/instructors/new_timetable', methods=['POST']) def add_timetable(): instructor_id = request.form['instructor_id'] schedule_id = request.form['schedule_id'] start_date = request.form['start_date'] instructor = details_repository.select(instructor_id) schedule = schedule_repository.select(schedule_id) timetable = InstructorTimetable(start_date, instructor, schedule, id) timetable_repository.save(timetable) return redirect('/instructors') @instructors_blueprint.route('/instructors/schedule/<id>') def e_schedule(id): schedule = schedule_repository.select(id) return render_template('instructor/edit_sch.html', schedule=schedule) @instructors_blueprint.route('/instructors/schedule/<id>', methods=['POST']) def edit_schedule(id): nickname = request.form['name'] variables = [False] * 7 strings = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] for index in range(len(variables)): if request.form.get(strings[index]): variables[index] = True start_time = request.form['start_time'] end_time = request.form['end_time'] instructor = InstructorSchedule(nickname, variables[0], variables[1], variables[2], variables[3], variables[4], variables[5], variables[6], start_time, end_time, id) schedule_repository.update(instructor) return redirect('/instructors') @instructors_blueprint.route('/instructors/details/<id>') def e_details(id): details = details_repository.select(id) return render_template('instructor/edit_dets.html', details=details) @instructors_blueprint.route('/instructors/details/<id>', methods=['POST']) def edit_details(id): first_name = request.form['first_name'] last_name = request.form['last_name'] date_of_birth = request.form['date_of_birth'] instructor = InstructorDetails(first_name, last_name, date_of_birth, id) details_repository.update(instructor) return redirect('/instructors') @instructors_blueprint.route('/instructors/details/<id>/delete', methods=['POST']) def delete_instructor(id): details_repository.delete(id) return redirect('/instructors')
/controllers/member_controller.py
from flask import Blueprint, Flask, redirect, render_template, request from models.member import Member import repositories.member_repository as member_repository import datetime members_blueprint = Blueprint('members', __name__) @members_blueprint.route('/members') def members(): members = member_repository.select_all() return render_template('members/index.html', members=members) @members_blueprint.route('/members/<id>') def member(id): member = member_repository.select(id) classes = member_repository.select_classes(id) return render_template('members/edit.html', member=member, classes=classes) @members_blueprint.route('/members/<id>', methods=['POST']) def edit_member(id): first_name = request.form['first_name'] last_name = request.form['last_name'] date_of_birth = request.form['date_of_birth'] email = request.form['email'] phone = request.form['phone'] if request.form.get('membership'): membership = True member_since = request.form['member_since'] member_until = request.form['member_until'] if request.form.get('premium'): premium = True else: premium = False else: membership = False premium = False member_since = None member_until = None member = Member(first_name, last_name, email, phone, date_of_birth, membership, premium, member_since, member_until, id) member_repository.update(member) return redirect('/members') @members_blueprint.route('/members/new') def new_member(): return render_template('members/new.html') @members_blueprint.route('/members/new', methods=['POST']) def add_member(): first_name = request.form['first_name'] last_name = request.form['last_name'] date_of_birth = request.form['date_of_birth'] email = request.form['email'] phone = request.form['phone'] if request.form.get('membership'): membership = True member_since = request.form['member_since'] member_until = request.form['member_until'] if request.form.get('premium'): premium = True else: premium = False else: membership = False premium = False member_since = None member_until = None member = Member(first_name, last_name, email, phone, date_of_birth, membership, premium, member_since, member_until, id) member_repository.save(member) return redirect('/members') @members_blueprint.route('/members/<id>/delete', methods=['POST']) def delete_member(id): member_repository.delete(id) return redirect('/members')
/controllers/room_controller.py
from flask import Blueprint, Flask, redirect, render_template, request from models.room import Room import repositories.room_repository as room_repository rooms_blueprint = Blueprint('rooms', __name__) @rooms_blueprint.route('/rooms') def rooms(): rooms = room_repository.select_all() return render_template('rooms/index.html', rooms=rooms) @rooms_blueprint.route('/rooms/new') def new_room(): return render_template('rooms/new.html') @rooms_blueprint.route('/rooms/new', methods=['POST']) def add_room(): name = request.form['name'] capacity = request.form['capacity'] description = request.form['description'] room = Room(name, capacity, description, id) room_repository.save(room) return redirect('/rooms') @rooms_blueprint.route('/rooms/<id>') def see_room(id): room = room_repository.select(id) return render_template('rooms/edit.html', room=room) @rooms_blueprint.route('/rooms/<id>', methods=['POST']) def edit_room(id): name = request.form['name'] capacity = request.form['capacity'] description = request.form['description'] room = Room(name, capacity, description, id) room_repository.update(room) return redirect('/rooms') @rooms_blueprint.route('/rooms/<id>/delete', methods=['POST']) def delete_room(id): room_repository.delete(id) return redirect('/rooms')
/controllers/schedule_controller.py
from flask import Blueprint, Flask, redirect, render_template, request from models.schedule import Schedule from models.schedule_member import ScheduleMember import repositories.schedule_repository as schedule_repository import repositories.instructor_details_repository as details_repository import repositories.gym_class_repository as gym_class_repository import repositories.room_repository as room_repository import repositories.member_repository as member_repository from datetime import date from datetime import timedelta import calendar schedule_blueprint = Blueprint('schedule', __name__) @schedule_blueprint.route('/schedule') def schedules(): rooms = room_repository.select_all() schedules = schedule_repository.select_dates() schedules_dict = {} dates = [date.today()+timedelta(days=i) for i in range(7)] days = [calendar.day_name[dates[i].weekday()] for i in range(7)] for i in range(7): if schedules[i] is not None: schedules_dict['today_schedules_' + str(i)] = schedules[i] else: schedules_dict['today_schedules_' + str(i)] = None return render_template('schedule/index.html', schedules=schedules, dates=dates, days=days, schedules_dict=schedules_dict, rooms=rooms) @schedule_blueprint.route('/schedule/new') def new_schedule(): instructors = details_repository.select_all() classes = gym_class_repository.select_all() rooms = room_repository.select_all() return render_template('schedule/new.html', instructors=instructors, classes=classes, rooms=rooms) @schedule_blueprint.route('/schedule/new', methods=['POST']) def add_schedule(): class_date = request.form['class_date'] start_time = request.form['start_time'] length_mins = request.form['length_mins'] instructor_id = request.form['instructor_id'] class_id = request.form['class_id'] room_id = request.form['room_id'] instructor = details_repository.select(instructor_id) gym_class = gym_class_repository.select(class_id) room = room_repository.select(room_id) schedule = Schedule(class_date, start_time, length_mins, instructor, gym_class, room, id) schedule_repository.save(schedule) return redirect('/schedule') @schedule_blueprint.route('/schedule/<id>') def show_schedule(id): current_cap = schedule_repository.count_member(id) schedule = schedule_repository.select(id) selected_members = member_repository.selected_members(id) return render_template('schedule/show.html', schedule=schedule, members=selected_members, current_cap=current_cap[0][0]) @schedule_blueprint.route('/schedule/<id>/new') def new_member(id): schedule = schedule_repository.select(id) members = member_repository.non_selected_members(id) return render_template('schedule/new_member.html', schedule=schedule, members=members) @schedule_blueprint.route('/schedule/<id>/new', methods=['POST']) def add_member(id): member_id = request.form['member_id'] member = member_repository.select(member_id) schedule = schedule_repository.select(id) schedule_member = ScheduleMember(member, schedule) schedule_repository.save_member(schedule_member) return redirect('/schedule') @schedule_blueprint.route('/schedule/all') def show_all(): upcoming_classes = [] previous_classes = [] schedules = schedule_repository.select_all() for schedule in schedules: if schedule.class_date < date.today(): previous_classes.append(schedule) else: upcoming_classes.append(schedule) return render_template('schedule/all.html', previous_classes=previous_classes, upcoming_classes=upcoming_classes) @schedule_blueprint.route('/schedule/<id>/remove') def remove_select_member(id): schedule = schedule_repository.select(id) members = member_repository.selected_members(id) return render_template('schedule/remove_member.html', schedule=schedule, members=members) @schedule_blueprint.route('/schedule/<id>/remove', methods=['POST']) def remove_member(id): member_id = request.form['member_id'] schedule_repository.remove_member(id, member_id) return redirect('/schedule')
/models/gym_class.py
class GymClass: def __init__(self, class_name, description, max_time, capacity, id=None): self.class_name = class_name self.description = description self.max_time = max_time self.capacity = capacity self.id = id
/models/instructor.py
class InstructorDetails: def __init__(self, first_name, last_name, date_of_birth, id=None) : self.first_name = first_name self.last_name = last_name self.date_of_birth = date_of_birth self.id = id class InstructorSchedule: def __init__(self, nickname, monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_time, end_time, id=None): self.nickname = nickname self.monday = monday self.tuesday = tuesday self.wednesday = wednesday self.thursday = thursday self.friday = friday self.saturday = saturday self.sunday = sunday self.start_time = start_time self.end_time = end_time self.id = id class InstructorTimetable: def __init__(self, week_start_date, detail, schedule, id=None): self.week_start_date = week_start_date self.detail = detail self.schedule = schedule self.id = id
/models/member.py
class Member: def __init__(self, first_name, last_name, email, phone, date_of_birth, membership=False, premium=False, member_since=None, member_until=None, id=None): self.first_name = first_name self.last_name = last_name self.email = email self.phone = phone self.date_of_birth = date_of_birth self.membership = membership self.premium = premium self.member_since = member_since self.member_until = member_until self.id = id
/models/room.py
class Room: def __init__(self, room_name, capacity, descripton=None, id=None): self.room_name = room_name self.capacity = capacity self.description = descripton self.id = id
/models/schedule.py
class Schedule: def __init__(self, class_date, start_time, length_mins, instructor, gym_class, room, id=None): self.class_date = class_date self.start_time = start_time self.length_mins = length_mins self.instructor = instructor self.gym_class = gym_class self.room = room self.id = id
/models/schedule_member.py
class ScheduleMember: def __init__(self, member, schedule, id=None): self.member = member self.schedule = schedule self.id = id
/repositories/gym_class_repository.py
from database.run_sql import run_sql from models.gym_class import GymClass def save(gym_class): sql = """INSERT INTO classes (class_name, description, max_time, capacity) VALUES ( %s, %s, %s, %s ) RETURNING id""" values = [gym_class.class_name, gym_class.description, gym_class.max_time, gym_class.capacity] results = run_sql(sql, values) gym_class.id = results[0]['id'] return gym_class def select_all(): gym_classes = [] sql = "SELECT * FROM classes ORDER BY id" results = run_sql(sql) for row in results: gym_class = GymClass(row['class_name'], row['description'], row['max_time'], row['capacity'], row['id']) gym_classes.append(gym_class) return gym_classes def select(id): gym_class = None sql = "SELECT * FROM classes WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: gym_class = GymClass(result['class_name'], result['description'], result['max_time'], result['capacity'], result['id']) return gym_class def update(gym_class): sql = """UPDATE classes SET class_name = %s, description = %s, max_time = %s, capacity = %s WHERE id = %s""" values = [gym_class.class_name, gym_class.description, gym_class.max_time, gym_class.capacity, gym_class.id] run_sql(sql, values) def delete_all(): sql = "DELETE FROM classes" run_sql(sql) def delete(id): sql = "DELETE FROM classes WHERE id = %s" values = [id] run_sql(sql, values)
/repositories/instructor_details_repository.py
from database.run_sql import run_sql from models.instructor import InstructorDetails def save(instructor): sql = """INSERT INTO instructor_details (first_name, last_name, date_of_birth) VALUES ( %s, %s, %s ) RETURNING id""" values = [instructor.first_name, instructor.last_name, instructor.date_of_birth] results = run_sql( sql, values ) instructor.id = results[0]['id'] return instructor def select(id): instructor = None sql = "SELECT * FROM instructor_details WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: instructor = InstructorDetails(result['first_name'], result['last_name'], result['date_of_birth'], result['id']) return instructor def select_all(): instructors = [] sql = "SELECT * FROM instructor_details" results = run_sql(sql) for row in results: instructor = InstructorDetails(row['first_name'], row['last_name'], row['date_of_birth'], row['id']) instructors.append(instructor) return instructors def update(instructor): sql = """UPDATE instructor_details SET first_name = %s, last_name = %s, date_of_birth = %s WHERE id = %s""" values = [instructor.first_name, instructor.last_name, instructor.date_of_birth, instructor.id] run_sql(sql, values) def delete_all(): sql = "DELETE FROM instructor_details" run_sql(sql) def delete(id): sql = "DELETE FROM instructor_details WHERE id = %s" values = [id] run_sql(sql, values)
/repositories/instructor_schedule_repository.py
from database.run_sql import run_sql from models.instructor import InstructorSchedule def save(instructor): sql = """INSERT INTO instructor_schedules (nickname, monday, tuesday, wednesday, thursday, friday, saturday, sunday, start_time, end_time) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s ) RETURNING id""" values = [instructor.nickname, instructor.monday, instructor.tuesday, instructor.wednesday, instructor.thursday, instructor.friday, instructor.saturday, instructor.sunday, instructor.start_time, instructor.end_time] results = run_sql( sql, values ) instructor.id = results[0]['id'] return instructor def select_all(): instructors = [] sql = "SELECT * FROM instructor_schedules" results = run_sql(sql) for row in results: instructor = InstructorSchedule(row['nickname'], row['monday'], row['tuesday'], row['wednesday'], row['thursday'], row['friday'], row['saturday'], row['sunday'], row['start_time'], row['end_time'], row['id']) instructors.append(instructor) return instructors def select(id): instructor = None sql = "SELECT * FROM instructor_schedules WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: instructor = InstructorSchedule(result['nickname'], result['monday'], result['tuesday'], result['wednesday'], result['thursday'], result['friday'], result['saturday'], result['sunday'], result['start_time'], result['end_time'], result['id']) return instructor def update(instructor): sql = """UPDATE instructor_schedules SET nickname = %s, monday = %s, tuesday = %s, wednesday = %s, thursday = %s, friday = %s, saturday = %s, sunday = %s, start_time = %s, end_time = %s WHERE id = %s""" values = [instructor.nickname, instructor.monday, instructor.tuesday, instructor.wednesday, instructor.thursday, instructor.friday, instructor.saturday, instructor.sunday, instructor.start_time, instructor.end_time, instructor.id] run_sql(sql, values) def delete_all(): sql = "DELETE FROM instructor_schedules" run_sql(sql) def delete(id): sql = "DELETE FROM instructor_scheudles WHERE id = %s" values = [id] run_sql(sql, values)
/repositories/instructor_timetable_repository.py
from database.run_sql import run_sql from models.instructor import InstructorDetails, InstructorSchedule, InstructorTimetable import repositories.instructor_details_repository as details_repository import repositories.instructor_schedule_repository as schedule_repository def save(timetable): sql = """INSERT INTO instructor_timetables ( week_start, i_details_id, i_schedules_id ) VALUES ( %s, %s, %s ) RETURNING id""" values = [timetable.week_start_date, timetable.detail.id, timetable.schedule.id] results = run_sql( sql, values ) timetable.id = results[0]['id'] return timetable def select_all(): timetables = [] sql = "SELECT * FROM instructor_timetables" results = run_sql(sql) for row in results: detail = details_repository.select(row['i_details_id']) schedule = schedule_repository.select(row['i_schedules_id']) timetable = InstructorTimetable(row['week_start'], detail, schedule, row['id']) timetables.append(timetable) return timetables def delete_all(): sql = "DELETE FROM instructor_timetables" run_sql(sql) def delete(id): sql = "DELETE FROM instructor_timetables WHERE id = %s" values = [id] run_sql(sql, values)
/repositories/member_repository.py
from database.run_sql import run_sql from models.member import Member import repositories.schedule_repository as schedule_repository from datetime import date def save(member): sql = """INSERT INTO members (first_name, last_name, email, phone, date_of_birth, membership, premium, member_since, member_until) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id""" values = [member.first_name, member.last_name, member.email, member.phone, member.date_of_birth, member.membership, member.premium, member.member_since, member.member_until] results = run_sql(sql, values) member.id = results[0]['id'] return member def select_all(): members = [] sql = "SELECT * FROM members" results = run_sql(sql) for row in results: member = Member(row['first_name'], row['last_name'], row['email'], row['phone'], row['date_of_birth'], row['membership'], row['premium'], row['member_since'], row['member_until'], row['id']) members.append(member) return members def select(id): member = None sql = "SELECT * FROM members WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: member = Member(result['first_name'], result['last_name'], result['email'], result['phone'], result['date_of_birth'], result['membership'], result['premium'], result['member_since'], result['member_until'], result['id']) return member def update(member): sql = """UPDATE members SET first_name = %s, last_name = %s, email = %s, phone = %s, date_of_birth = %s, membership = %s, premium = %s, member_since = %s, member_until = %s WHERE id = %s""" values = [member.first_name, member.last_name, member.email, member.phone, member.date_of_birth, member.membership, member.premium, member.member_since, member.member_until, member.id] run_sql(sql, values) def delete_all(): sql = "DELETE FROM members" run_sql(sql) def delete(id): sql = "DELETE FROM members WHERE id = %s" values = [id] run_sql(sql, values) def selected_members(id): sql = """SELECT members.* FROM members INNER JOIN schedules_members ON schedules_members.member_id = members.id WHERE schedules_members.schedule_id = %s""" values = [id] members = run_sql(sql, values) return members def non_selected_members(id): sql = """SELECT members.id FROM members FULL OUTER JOIN schedules_members ON schedules_members.member_id = members.id WHERE schedule_id = %s""" values = [id] rows = run_sql(sql, values) member_ids = tuple([row[0] for row in rows]) if member_ids == (): sql2 = "SELECT * FROM members" members = run_sql(sql2) else: sql2 = """SELECT * FROM members WHERE id NOT IN %s""" values2 = [member_ids] members = run_sql(sql2, values2) return members def sort(type): members = [] sql = "SELECT * FROM members ORDER BY %s" values = [type] results = run_sql(sql, values) for row in results: member = Member(row['first_name'], row['last_name'], row['email'], row['phone'], row['date_of_birth'], row['membership'], row['premium'], row['member_since'], row['member_until'], row['id']) members.append(member) return members def select_classes(id): classes = [] sql = "SELECT * FROM schedules_members WHERE member_id = %s" values = [id] results = run_sql(sql, values) for row in results: schedule = schedule_repository.select(row['schedule_id']) if schedule.class_date >= date.today(): classes.append(schedule) return classes
/repositories/room_repository.py
from database.run_sql import run_sql from models.room import Room def save(room): sql = """INSERT INTO rooms (room_name, capacity, description) VALUES (%s, %s, %s) RETURNING id""" values = [room.room_name, room.capacity, room.description] results = run_sql(sql, values) room.id = results[0]['id'] return room def select_all(): rooms = [] sql = "SELECT * FROM rooms ORDER BY id" results = run_sql(sql) for row in results: room = Room(row['room_name'], row['capacity'], row['description'], row['id']) rooms.append(room) return rooms def select(id): room = None sql = "SELECT * FROM rooms WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: room = Room(result['room_name'], result['capacity'], result['description'], result['id']) return room def update(room): sql = """UPDATE rooms SET room_name = %s, capacity = %s, description = %s WHERE id = %s""" values = [room.room_name, room.capacity, room.description, room.id] run_sql(sql, values) def delete_all(): sql = "DELETE FROM rooms" run_sql(sql) def delete(id): sql = "DELETE FROM rooms WHERE id = %s" values = [id] run_sql(sql, values)
/repositories/schedule_repository.py
from database.run_sql import run_sql from models.schedule import Schedule from models.instructor import InstructorDetails from models.gym_class import GymClass from models.room import Room from models.schedule_member import ScheduleMember import repositories.instructor_details_repository as instructor_repository import repositories.gym_class_repository as gym_class_repository import repositories.room_repository as room_repository import repositories.member_repository as member_repository from datetime import timedelta from datetime import date def save(schedule): sql = """INSERT INTO schedules (class_date, start_time, length_mins, instructor_id, class_id, room_id) VALUES ( %s, %s, %s, %s, %s, %s ) RETURNING id""" values = [schedule.class_date, schedule.start_time, schedule.length_mins, schedule.instructor.id, schedule.gym_class.id, schedule.room.id] results = run_sql(sql, values) id = results[0]['id'] schedule.id = id def select_all(): schedules = [] sql = "SELECT * FROM schedules ORDER BY class_date" results = run_sql(sql) for row in results: instructor = instructor_repository.select(row['instructor_id']) gym_class = gym_class_repository.select(row['class_id']) room = room_repository.select(row['room_id']) schedule = Schedule(row['class_date'], row['start_time'], row['length_mins'], instructor, gym_class, room, row['id']) schedules.append(schedule) return schedules def select_dates(): schedules_list = [] sql = "SELECT * FROM schedules WHERE class_date = %s ORDER BY start_time" for index in range(7): schedules = [] values = [date.today() + timedelta(days=index)] results = run_sql(sql, values) if results is not None: for row in results: instructor = instructor_repository.select(row['instructor_id']) gym_class = gym_class_repository.select(row['class_id']) room = room_repository.select(row['room_id']) schedule = Schedule(row['class_date'], row['start_time'], row['length_mins'], instructor, gym_class, room, row['id']) schedules.append(schedule) else: schedule = None schedules.append(schedule) schedules_list.append(schedules) return schedules_list def update(schedule): sql = """UPDATE schedules SET (class_date, length_mins, start_time, instructor_id, class_id, room_id) = (%s, %s, %s, %s, %s, %s) WHERE id = %s""" values = [schedule.class_date, schedule.length_mins, schedule.start_time, schedule.instructor.id, schedule.gym_class.id, schedule.room.id] run_sql(sql, values) def select(id): schedule = None sql = "SELECT * FROM schedules WHERE id = %s" values = [id] result = run_sql(sql, values)[0] if result is not None: instructor = instructor_repository.select(result['instructor_id']) gym_class = gym_class_repository.select(result['class_id']) room = room_repository.select(result['room_id']) schedule = Schedule(result['class_date'], result['start_time'], result['length_mins'], instructor, gym_class, room, result['id']) return schedule def delete_all(): sql = "DELETE FROM schedules" run_sql(sql) def delete(id): sql = "DELETE FROM schedules WHERE id = %s" values = [id] run_sql(sql, values) def save_member(member): sql = """INSERT INTO schedules_members (member_id, schedule_id) VALUES (%s, %s) RETURNING id""" values = [member.member.id, member.schedule.id] results = run_sql(sql, values) id = results[0]['id'] member.id = id def count_member(id): sql = """SELECT COUNT(member_id) FROM schedules_members WHERE schedule_id = %s""" values = [id] count = run_sql(sql, values) return count def remove_member(id, member_id): sql = "DELETE FROM schedules_members WHERE schedule_id = %s AND member_id = %s" values = [id, member_id] run_sql(sql, values)
/run_test.py
import unittest from tests.member_test import TestMember from tests.gym_class_test import TestGymClass from tests.instructor_test import TestInstructorDetails, TestInstructorSchedule from tests.room_test import TestRoom from tests.schedule_test import TestSchedule if __name__ == '__main__': unittest.main()
/tests/gym_class_test.py
import unittest from models.gym_class import GymClass class TestGymClass(unittest.TestCase): def setUp(self): self.gym_class = GymClass('Hot Yoga', 'Yoga performed in a very warm studio', 60, 16) def test_class_has_name(self): self.assertEqual('Hot Yoga', self.gym_class.class_name) def test_class_has_description(self): self.assertEqual('Yoga performed in a very warm studio', self.gym_class.description) def test_class_has_max_time(self): self.assertEqual(60, self.gym_class.max_time) def test_class_has_capacity(self): self.assertEqual(16, self.gym_class.capacity)
/tests/instructor_test.py
import unittest from models.instructor import InstructorDetails, InstructorSchedule import datetime class TestInstructorDetails(unittest.TestCase): def setUp(self): self.instructor = InstructorDetails('Mary', 'Jones', datetime.date(1992, 3, 12)) def test_instructor_has_first_name(self): self.assertEqual('Mary', self.instructor.first_name) def test_instructor_has_last_name(self): self.assertEqual('Jones', self.instructor.last_name) def test_instructor_has_date_of_birth(self): self.assertEqual('1992-03-12', str(self.instructor.date_of_birth)) class TestInstructorSchedule(unittest.TestCase): def setUp(self): self.instructor_dets = InstructorDetails('Mary', 'Jones', datetime.date(1992, 3, 12)) self.instructor = InstructorSchedule(datetime.date(2021, 3, 22), True, True, True, True, True, False, False, datetime.time(9, 0), datetime.time(17, 0), self.instructor_dets) def test_instructor_has_week_start_date(self): self.assertEqual('2021-03-22', str(self.instructor.week_start_date)) def test_instructor_has_instructor(self): self.assertEqual('Mary', self.instructor.instructor.first_name) def test_instructor_has_day(self): self.assertEqual(True, self.instructor.monday) self.assertEqual(True, self.instructor.tuesday) self.assertEqual(True, self.instructor.wednesday) self.assertEqual(True, self.instructor.thursday) self.assertEqual(True, self.instructor.friday) self.assertEqual(False, self.instructor.saturday) self.assertEqual(False, self.instructor.sunday) def test_instructor_has_start_time(self): self.assertEqual('09:00:00', str(self.instructor.start_time)) def test_instructor_has_end_time(self): self.assertEqual('17:00:00', str(self.instructor.end_time))
/tests/member_test.py
import unittest import datetime from models.member import Member class TestMember(unittest.TestCase): def setUp(self): self.member = Member('John', 'Smith', 'johnsmith@gmail.com', '07595964019', datetime.date(1997, 5, 17), True, False, datetime.date(2021, 3, 21), datetime.date(2021, 4, 21)) def test_member_has_first_name(self): self.assertEqual('John', self.member.first_name) def test_member_has_last_name(self): self.assertEqual('Smith', self.member.last_name) def test_member_has_email(self): self.assertEqual('johnsmith@gmail.com', self.member.email) def test_member_has_phone(self): self.assertEqual('07595964019', self.member.phone) def test_member_has_date_of_birth(self): self.assertEqual('1997-05-17', str(self.member.date_of_birth)) def test_member_has_membership(self): self.assertEqual(True, self.member.membership) def test_member_has_premium(self): self.assertEqual(False, self.member.premium) def test_member_has_member_since(self): self.assertEqual('2021-03-21', str(self.member.member_since)) def test_member_has_member_until(self): self.assertEqual('2021-04-21', str(self.member.member_until))
/tests/room_test.py
import unittest from models.room import Room class TestRoom(unittest.TestCase): def setUp(self): self.room = Room('Room 1', 24, 'Large Room') def test_room_has_name(self): self.assertEqual('Room 1', self.room.room_name) def test_room_has_capacity(self): self.assertEqual(24, self.room.capacity) def test_room_has_description(self): self.assertEqual('Large Room', self.room.description)
/tests/schedule_test.py
import unittest from models.schedule import Schedule import datetime class TestSchedule(unittest.TestCase): def setUp(self): self.schedule = Schedule(datetime.date(2021, 3, 21), 45) def test_schedule_has_class_date(self): self.assertEqual('2021-03-21', str(self.schedule.class_date)) def test_schedule_has_length_mins(self): self.assertEqual(45, self.schedule.length_mins)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Redent0r/Libra
refs/heads/master
{"/master_admin.py": ["/gui_move.py", "/gui_client.py", "/gui_sale.py", "/gui_inventory.py", "/gui_purchase.py"], "/main_login.py": ["/mec_login.py", "/gui_login.py"]}
└── ├── gui_client.py ├── gui_inventory.py ├── gui_login.py ├── gui_move.py ├── gui_purchase.py ├── gui_sale.py ├── main_login.py ├── master_admin.py ├── mec_inventory.py └── mec_login.py
/gui_client.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_cliente.ui' # # Created by: PyQt4 UI code generator 4.12.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName(_fromUtf8("Dialog")) Dialog.resize(188, 227) Dialog.setMinimumSize(QtCore.QSize(188, 227)) Dialog.setMaximumSize(QtCore.QSize(350, 227)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/manager-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) Dialog.setWindowIcon(icon) Dialog.setModal(True) self.gridLayout = QtGui.QGridLayout(Dialog) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.formLayout = QtGui.QFormLayout() self.formLayout.setObjectName(_fromUtf8("formLayout")) self.label = QtGui.QLabel(Dialog) self.label.setObjectName(_fromUtf8("label")) self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label) self.label_2 = QtGui.QLabel(Dialog) self.label_2.setObjectName(_fromUtf8("label_2")) self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_2) self.leditName = QtGui.QLineEdit(Dialog) self.leditName.setObjectName(_fromUtf8("leditName")) self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.leditName) self.leditPhone = QtGui.QLineEdit(Dialog) self.leditPhone.setObjectName(_fromUtf8("leditPhone")) self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.leditPhone) self.label_3 = QtGui.QLabel(Dialog) self.label_3.setObjectName(_fromUtf8("label_3")) self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_3) self.label_4 = QtGui.QLabel(Dialog) self.label_4.setObjectName(_fromUtf8("label_4")) self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_4) self.leditAddress = QtGui.QLineEdit(Dialog) self.leditAddress.setObjectName(_fromUtf8("leditAddress")) self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.leditAddress) self.leditEmail = QtGui.QLineEdit(Dialog) self.leditEmail.setObjectName(_fromUtf8("leditEmail")) self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.leditEmail) self.label_5 = QtGui.QLabel(Dialog) self.label_5.setObjectName(_fromUtf8("label_5")) self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_5) self.leditCellphone = QtGui.QLineEdit(Dialog) self.leditCellphone.setObjectName(_fromUtf8("leditCellphone")) self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.leditCellphone) self.label_6 = QtGui.QLabel(Dialog) self.label_6.setObjectName(_fromUtf8("label_6")) self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_6) self.leditFax = QtGui.QLineEdit(Dialog) self.leditFax.setObjectName(_fromUtf8("leditFax")) self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.leditFax) self.label_7 = QtGui.QLabel(Dialog) self.label_7.setObjectName(_fromUtf8("label_7")) self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_7) self.leditID = QtGui.QLineEdit(Dialog) self.leditID.setMinimumSize(QtCore.QSize(0, 0)) self.leditID.setPlaceholderText(_fromUtf8("")) self.leditID.setObjectName(_fromUtf8("leditID")) self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.leditID) self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.btnAdd = QtGui.QPushButton(Dialog) self.btnAdd.setObjectName(_fromUtf8("btnAdd")) self.horizontalLayout.addWidget(self.btnAdd) self.btnUndo = QtGui.QPushButton(Dialog) self.btnUndo.setObjectName(_fromUtf8("btnUndo")) self.horizontalLayout.addWidget(self.btnUndo) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): Dialog.setWindowTitle(_translate("Dialog", "Add Client", None)) self.label.setText(_translate("Dialog", "Name: ", None)) self.label_2.setText(_translate("Dialog", "Phone: ", None)) self.label_3.setText(_translate("Dialog", "Address: ", None)) self.label_4.setText(_translate("Dialog", "E-mail: ", None)) self.label_5.setText(_translate("Dialog", "Cellphone: ", None)) self.label_6.setText(_translate("Dialog", "Fax:", None)) self.label_7.setText(_translate("Dialog", "ID:", None)) self.btnAdd.setText(_translate("Dialog", "Add", None)) self.btnUndo.setText(_translate("Dialog", "Undo", None)) import res_rc
/gui_inventory.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_inventory.ui' # # Created by: PyQt4 UI code generator 4.12.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(1269, 712) font = QtGui.QFont() font.setPointSize(10) MainWindow.setFont(font) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/dbIcon.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off) MainWindow.setWindowIcon(icon) MainWindow.setLocale(QtCore.QLocale(QtCore.QLocale.Spanish, QtCore.QLocale.Panama)) MainWindow.setIconSize(QtCore.QSize(60, 60)) MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly) MainWindow.setDocumentMode(False) MainWindow.setTabShape(QtGui.QTabWidget.Rounded) MainWindow.setUnifiedTitleAndToolBarOnMac(False) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.gridLayout = QtGui.QGridLayout(self.centralwidget) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.tabWidget = QtGui.QTabWidget(self.centralwidget) font = QtGui.QFont() font.setPointSize(10) self.tabWidget.setFont(font) self.tabWidget.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)) self.tabWidget.setTabPosition(QtGui.QTabWidget.North) self.tabWidget.setTabShape(QtGui.QTabWidget.Rounded) self.tabWidget.setIconSize(QtCore.QSize(25, 25)) self.tabWidget.setElideMode(QtCore.Qt.ElideNone) self.tabWidget.setObjectName(_fromUtf8("tabWidget")) self.tab_balance = QtGui.QWidget() self.tab_balance.setObjectName(_fromUtf8("tab_balance")) self.verticalLayout_3 = QtGui.QVBoxLayout(self.tab_balance) self.verticalLayout_3.setMargin(0) self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3")) self.verticalLayout_2 = QtGui.QVBoxLayout() self.verticalLayout_2.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint) self.verticalLayout_2.setContentsMargins(0, 0, -1, -1) self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.horizontalLayout_5 = QtGui.QHBoxLayout() self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5")) self.horizontalLayout_7 = QtGui.QHBoxLayout() self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7")) self.verticalLayout_7 = QtGui.QVBoxLayout() self.verticalLayout_7.setContentsMargins(0, -1, -1, -1) self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7")) self.groupBox = QtGui.QGroupBox(self.tab_balance) self.groupBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.groupBox.setObjectName(_fromUtf8("groupBox")) self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.formLayout = QtGui.QFormLayout() self.formLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint) self.formLayout.setFormAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.formLayout.setContentsMargins(0, -1, -1, -1) self.formLayout.setVerticalSpacing(6) self.formLayout.setObjectName(_fromUtf8("formLayout")) self.dateAnnual = QtGui.QDateEdit(self.groupBox) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.dateAnnual.sizePolicy().hasHeightForWidth()) self.dateAnnual.setSizePolicy(sizePolicy) self.dateAnnual.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 1, 2), QtCore.QTime(0, 0, 0))) self.dateAnnual.setDate(QtCore.QDate(2017, 1, 2)) self.dateAnnual.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2017, 1, 2), QtCore.QTime(0, 0, 0))) self.dateAnnual.setMinimumDate(QtCore.QDate(2017, 1, 2)) self.dateAnnual.setObjectName(_fromUtf8("dateAnnual")) self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.dateAnnual) self.radioMonthly = QtGui.QRadioButton(self.groupBox) self.radioMonthly.setObjectName(_fromUtf8("radioMonthly")) self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.radioMonthly) self.dateMonthly = QtGui.QDateEdit(self.groupBox) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.dateMonthly.sizePolicy().hasHeightForWidth()) self.dateMonthly.setSizePolicy(sizePolicy) self.dateMonthly.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 5, 1), QtCore.QTime(0, 0, 0))) self.dateMonthly.setDate(QtCore.QDate(2017, 5, 1)) self.dateMonthly.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2017, 5, 1), QtCore.QTime(0, 0, 0))) self.dateMonthly.setMinimumDate(QtCore.QDate(2017, 5, 1)) self.dateMonthly.setCurrentSection(QtGui.QDateTimeEdit.MonthSection) self.dateMonthly.setObjectName(_fromUtf8("dateMonthly")) self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.dateMonthly) self.radioAnnual = QtGui.QRadioButton(self.groupBox) self.radioAnnual.setObjectName(_fromUtf8("radioAnnual")) self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.radioAnnual) self.radioHistoric = QtGui.QRadioButton(self.groupBox) self.radioHistoric.setChecked(True) self.radioHistoric.setObjectName(_fromUtf8("radioHistoric")) self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.radioHistoric) self.radioDaily = QtGui.QRadioButton(self.groupBox) self.radioDaily.setObjectName(_fromUtf8("radioDaily")) self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.radioDaily) self.horizontalLayout.addLayout(self.formLayout) self.calBalance = QtGui.QCalendarWidget(self.groupBox) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.calBalance.sizePolicy().hasHeightForWidth()) self.calBalance.setSizePolicy(sizePolicy) self.calBalance.setMinimumSize(QtCore.QSize(300, 0)) self.calBalance.setMaximumSize(QtCore.QSize(16777215, 100)) self.calBalance.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)) self.calBalance.setSelectedDate(QtCore.QDate(2017, 3, 1)) self.calBalance.setMinimumDate(QtCore.QDate(2017, 3, 1)) self.calBalance.setMaximumDate(QtCore.QDate(2100, 12, 31)) self.calBalance.setFirstDayOfWeek(QtCore.Qt.Monday) self.calBalance.setGridVisible(True) self.calBalance.setHorizontalHeaderFormat(QtGui.QCalendarWidget.NoHorizontalHeader) self.calBalance.setVerticalHeaderFormat(QtGui.QCalendarWidget.NoVerticalHeader) self.calBalance.setNavigationBarVisible(True) self.calBalance.setObjectName(_fromUtf8("calBalance")) self.horizontalLayout.addWidget(self.calBalance) self.verticalLayout_7.addWidget(self.groupBox) self.groupBox_2 = QtGui.QGroupBox(self.tab_balance) self.groupBox_2.setObjectName(_fromUtf8("groupBox_2")) self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_2) self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2")) self.tblPurchasesBal = QtGui.QTableView(self.groupBox_2) self.tblPurchasesBal.setAlternatingRowColors(True) self.tblPurchasesBal.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblPurchasesBal.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblPurchasesBal.setSortingEnabled(True) self.tblPurchasesBal.setObjectName(_fromUtf8("tblPurchasesBal")) self.tblPurchasesBal.horizontalHeader().setStretchLastSection(True) self.tblPurchasesBal.verticalHeader().setVisible(False) self.gridLayout_2.addWidget(self.tblPurchasesBal, 0, 0, 1, 1) self.verticalLayout_7.addWidget(self.groupBox_2) self.groupBox_3 = QtGui.QGroupBox(self.tab_balance) self.groupBox_3.setObjectName(_fromUtf8("groupBox_3")) self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_3) self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3")) self.tblSalesBal = QtGui.QTableView(self.groupBox_3) self.tblSalesBal.setAlternatingRowColors(True) self.tblSalesBal.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblSalesBal.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblSalesBal.setSortingEnabled(True) self.tblSalesBal.setObjectName(_fromUtf8("tblSalesBal")) self.tblSalesBal.horizontalHeader().setStretchLastSection(True) self.tblSalesBal.verticalHeader().setVisible(False) self.gridLayout_3.addWidget(self.tblSalesBal, 0, 0, 1, 1) self.verticalLayout_7.addWidget(self.groupBox_3) self.horizontalLayout_7.addLayout(self.verticalLayout_7) self.tblBalance = QtGui.QTableWidget(self.tab_balance) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Ignored) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.tblBalance.sizePolicy().hasHeightForWidth()) self.tblBalance.setSizePolicy(sizePolicy) self.tblBalance.setMinimumSize(QtCore.QSize(350, 0)) font = QtGui.QFont() font.setPointSize(14) self.tblBalance.setFont(font) self.tblBalance.setFrameShape(QtGui.QFrame.Box) self.tblBalance.setFrameShadow(QtGui.QFrame.Raised) self.tblBalance.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.tblBalance.setTabKeyNavigation(False) self.tblBalance.setProperty("showDropIndicator", False) self.tblBalance.setDragDropOverwriteMode(False) self.tblBalance.setAlternatingRowColors(False) self.tblBalance.setSelectionMode(QtGui.QAbstractItemView.NoSelection) self.tblBalance.setTextElideMode(QtCore.Qt.ElideLeft) self.tblBalance.setShowGrid(True) self.tblBalance.setGridStyle(QtCore.Qt.SolidLine) self.tblBalance.setWordWrap(True) self.tblBalance.setCornerButtonEnabled(False) self.tblBalance.setRowCount(7) self.tblBalance.setColumnCount(3) self.tblBalance.setObjectName(_fromUtf8("tblBalance")) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(0, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(0, 2, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(1, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(1, 1, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(1, 2, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(2, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(2, 1, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(2, 2, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(3, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(3, 1, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(3, 2, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(4, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(4, 1, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(4, 2, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(5, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(5, 1, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(5, 2, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(6, 0, item) item = QtGui.QTableWidgetItem() self.tblBalance.setItem(6, 2, item) self.tblBalance.horizontalHeader().setVisible(False) self.tblBalance.verticalHeader().setVisible(False) self.horizontalLayout_7.addWidget(self.tblBalance) self.horizontalLayout_5.addLayout(self.horizontalLayout_7) self.verticalLayout_2.addLayout(self.horizontalLayout_5) self.verticalLayout_3.addLayout(self.verticalLayout_2) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/calculator.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_balance, icon1, _fromUtf8("")) self.tab_inventory = QtGui.QWidget() self.tab_inventory.setObjectName(_fromUtf8("tab_inventory")) self.verticalLayout_6 = QtGui.QVBoxLayout(self.tab_inventory) self.verticalLayout_6.setMargin(0) self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6")) self.horizontalLayout_6 = QtGui.QHBoxLayout() self.horizontalLayout_6.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6")) self.btnModifyInventory = QtGui.QPushButton(self.tab_inventory) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.btnModifyInventory.sizePolicy().hasHeightForWidth()) self.btnModifyInventory.setSizePolicy(sizePolicy) self.btnModifyInventory.setText(_fromUtf8("")) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/edit_write_pencil_pen_page-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnModifyInventory.setIcon(icon2) self.btnModifyInventory.setIconSize(QtCore.QSize(20, 20)) self.btnModifyInventory.setObjectName(_fromUtf8("btnModifyInventory")) self.horizontalLayout_6.addWidget(self.btnModifyInventory) self.btnMove = QtGui.QPushButton(self.tab_inventory) self.btnMove.setText(_fromUtf8("")) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/swap-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnMove.setIcon(icon3) self.btnMove.setObjectName(_fromUtf8("btnMove")) self.horizontalLayout_6.addWidget(self.btnMove) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_6.addItem(spacerItem) self.cmboxInventory = QtGui.QComboBox(self.tab_inventory) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.cmboxInventory.sizePolicy().hasHeightForWidth()) self.cmboxInventory.setSizePolicy(sizePolicy) self.cmboxInventory.setMinimumSize(QtCore.QSize(20, 0)) self.cmboxInventory.setSizeIncrement(QtCore.QSize(0, 0)) self.cmboxInventory.setEditable(False) self.cmboxInventory.setInsertPolicy(QtGui.QComboBox.InsertAtBottom) self.cmboxInventory.setModelColumn(0) self.cmboxInventory.setObjectName(_fromUtf8("cmboxInventory")) self.horizontalLayout_6.addWidget(self.cmboxInventory) self.leditInventory = QtGui.QLineEdit(self.tab_inventory) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.leditInventory.sizePolicy().hasHeightForWidth()) self.leditInventory.setSizePolicy(sizePolicy) self.leditInventory.setMinimumSize(QtCore.QSize(40, 0)) self.leditInventory.setObjectName(_fromUtf8("leditInventory")) self.horizontalLayout_6.addWidget(self.leditInventory) self.verticalLayout_6.addLayout(self.horizontalLayout_6) self.tblInventory = QtGui.QTableView(self.tab_inventory) self.tblInventory.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.tblInventory.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblInventory.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblInventory.setSortingEnabled(True) self.tblInventory.setCornerButtonEnabled(False) self.tblInventory.setObjectName(_fromUtf8("tblInventory")) self.tblInventory.horizontalHeader().setStretchLastSection(True) self.verticalLayout_6.addWidget(self.tblInventory) icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/paper-box-icon-63457.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_inventory, icon4, _fromUtf8("")) self.tab_purchases = QtGui.QWidget() self.tab_purchases.setObjectName(_fromUtf8("tab_purchases")) self.verticalLayout = QtGui.QVBoxLayout(self.tab_purchases) self.verticalLayout.setMargin(0) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.btnRemovePurchase = QtGui.QPushButton(self.tab_purchases) self.btnRemovePurchase.setText(_fromUtf8("")) icon5 = QtGui.QIcon() icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/Remove.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnRemovePurchase.setIcon(icon5) self.btnRemovePurchase.setIconSize(QtCore.QSize(20, 20)) self.btnRemovePurchase.setObjectName(_fromUtf8("btnRemovePurchase")) self.horizontalLayout_2.addWidget(self.btnRemovePurchase) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_2.addItem(spacerItem1) self.cmboxPurchases = QtGui.QComboBox(self.tab_purchases) self.cmboxPurchases.setObjectName(_fromUtf8("cmboxPurchases")) self.horizontalLayout_2.addWidget(self.cmboxPurchases) self.leditPurchases = QtGui.QLineEdit(self.tab_purchases) self.leditPurchases.setObjectName(_fromUtf8("leditPurchases")) self.horizontalLayout_2.addWidget(self.leditPurchases) self.verticalLayout.addLayout(self.horizontalLayout_2) self.tblPurchases = QtGui.QTableView(self.tab_purchases) self.tblPurchases.setAlternatingRowColors(True) self.tblPurchases.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblPurchases.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblPurchases.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel) self.tblPurchases.setSortingEnabled(True) self.tblPurchases.setWordWrap(True) self.tblPurchases.setCornerButtonEnabled(False) self.tblPurchases.setObjectName(_fromUtf8("tblPurchases")) self.tblPurchases.horizontalHeader().setStretchLastSection(True) self.tblPurchases.verticalHeader().setVisible(False) self.tblPurchases.verticalHeader().setSortIndicatorShown(False) self.verticalLayout.addWidget(self.tblPurchases) icon6 = QtGui.QIcon() icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/cart-arrow-down-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_purchases, icon6, _fromUtf8("")) self.tab_sales = QtGui.QWidget() self.tab_sales.setObjectName(_fromUtf8("tab_sales")) self.verticalLayout_4 = QtGui.QVBoxLayout(self.tab_sales) self.verticalLayout_4.setMargin(0) self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4")) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.btnRemoveSale = QtGui.QPushButton(self.tab_sales) self.btnRemoveSale.setText(_fromUtf8("")) icon7 = QtGui.QIcon() icon7.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/undo-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnRemoveSale.setIcon(icon7) self.btnRemoveSale.setIconSize(QtCore.QSize(20, 20)) self.btnRemoveSale.setObjectName(_fromUtf8("btnRemoveSale")) self.horizontalLayout_3.addWidget(self.btnRemoveSale) self.btnSettle = QtGui.QPushButton(self.tab_sales) self.btnSettle.setText(_fromUtf8("")) icon8 = QtGui.QIcon() icon8.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/payment-256.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnSettle.setIcon(icon8) self.btnSettle.setIconSize(QtCore.QSize(20, 20)) self.btnSettle.setObjectName(_fromUtf8("btnSettle")) self.horizontalLayout_3.addWidget(self.btnSettle) spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem2) self.cmboxSales = QtGui.QComboBox(self.tab_sales) self.cmboxSales.setObjectName(_fromUtf8("cmboxSales")) self.horizontalLayout_3.addWidget(self.cmboxSales) self.leditSales = QtGui.QLineEdit(self.tab_sales) self.leditSales.setObjectName(_fromUtf8("leditSales")) self.horizontalLayout_3.addWidget(self.leditSales) self.verticalLayout_4.addLayout(self.horizontalLayout_3) self.tblSales = QtGui.QTableView(self.tab_sales) self.tblSales.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.tblSales.setAlternatingRowColors(True) self.tblSales.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblSales.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblSales.setSortingEnabled(True) self.tblSales.setCornerButtonEnabled(False) self.tblSales.setObjectName(_fromUtf8("tblSales")) self.tblSales.horizontalHeader().setStretchLastSection(True) self.tblSales.verticalHeader().setVisible(False) self.verticalLayout_4.addWidget(self.tblSales) icon9 = QtGui.QIcon() icon9.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/cashier-icon-png-8.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_sales, icon9, _fromUtf8("")) self.tab_clients = QtGui.QWidget() self.tab_clients.setObjectName(_fromUtf8("tab_clients")) self.verticalLayout_5 = QtGui.QVBoxLayout(self.tab_clients) self.verticalLayout_5.setMargin(0) self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5")) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4")) self.btnRemoveClient = QtGui.QPushButton(self.tab_clients) self.btnRemoveClient.setText(_fromUtf8("")) self.btnRemoveClient.setIcon(icon5) self.btnRemoveClient.setIconSize(QtCore.QSize(20, 20)) self.btnRemoveClient.setObjectName(_fromUtf8("btnRemoveClient")) self.horizontalLayout_4.addWidget(self.btnRemoveClient) self.btnModifyClient = QtGui.QPushButton(self.tab_clients) self.btnModifyClient.setText(_fromUtf8("")) icon10 = QtGui.QIcon() icon10.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/edit_user_male_write_pencil_man-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnModifyClient.setIcon(icon10) self.btnModifyClient.setIconSize(QtCore.QSize(20, 20)) self.btnModifyClient.setObjectName(_fromUtf8("btnModifyClient")) self.horizontalLayout_4.addWidget(self.btnModifyClient) spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem3) self.cmboxClients = QtGui.QComboBox(self.tab_clients) self.cmboxClients.setObjectName(_fromUtf8("cmboxClients")) self.horizontalLayout_4.addWidget(self.cmboxClients) self.leditClients = QtGui.QLineEdit(self.tab_clients) self.leditClients.setObjectName(_fromUtf8("leditClients")) self.horizontalLayout_4.addWidget(self.leditClients) self.verticalLayout_5.addLayout(self.horizontalLayout_4) self.tblClients = QtGui.QTableView(self.tab_clients) self.tblClients.setAlternatingRowColors(True) self.tblClients.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblClients.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblClients.setSortingEnabled(True) self.tblClients.setCornerButtonEnabled(False) self.tblClients.setObjectName(_fromUtf8("tblClients")) self.tblClients.horizontalHeader().setStretchLastSection(True) self.tblClients.verticalHeader().setVisible(False) self.verticalLayout_5.addWidget(self.tblClients) icon11 = QtGui.QIcon() icon11.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/15656.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.tabWidget.addTab(self.tab_clients, icon11, _fromUtf8("")) self.gridLayout.addWidget(self.tabWidget, 0, 1, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.toolBar = QtGui.QToolBar(MainWindow) self.toolBar.setLayoutDirection(QtCore.Qt.LeftToRight) self.toolBar.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)) self.toolBar.setMovable(True) self.toolBar.setIconSize(QtCore.QSize(30, 30)) self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.toolBar.setFloatable(False) self.toolBar.setObjectName(_fromUtf8("toolBar")) MainWindow.addToolBar(QtCore.Qt.LeftToolBarArea, self.toolBar) self.actionPurchase = QtGui.QAction(MainWindow) icon12 = QtGui.QIcon() icon12.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/plus-icon-0.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionPurchase.setIcon(icon12) self.actionPurchase.setObjectName(_fromUtf8("actionPurchase")) self.actionSale = QtGui.QAction(MainWindow) icon13 = QtGui.QIcon() icon13.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/product_basket-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionSale.setIcon(icon13) self.actionSale.setObjectName(_fromUtf8("actionSale")) self.actionClient = QtGui.QAction(MainWindow) icon14 = QtGui.QIcon() icon14.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/manager-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionClient.setIcon(icon14) self.actionClient.setObjectName(_fromUtf8("actionClient")) self.actionRefresh = QtGui.QAction(MainWindow) icon15 = QtGui.QIcon() icon15.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/Oxygen-Icons.org-Oxygen-Actions-view-refresh.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.actionRefresh.setIcon(icon15) self.actionRefresh.setObjectName(_fromUtf8("actionRefresh")) self.toolBar.addAction(self.actionRefresh) self.toolBar.addSeparator() self.toolBar.addAction(self.actionSale) self.toolBar.addSeparator() self.toolBar.addAction(self.actionPurchase) self.toolBar.addSeparator() self.toolBar.addAction(self.actionClient) self.toolBar.addSeparator() self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Libra v1.0.0", None)) self.groupBox.setTitle(_translate("MainWindow", "Period", None)) self.dateAnnual.setDisplayFormat(_translate("MainWindow", "yyyy", None)) self.radioMonthly.setText(_translate("MainWindow", "Monthly", None)) self.dateMonthly.setDisplayFormat(_translate("MainWindow", "MMM/yyyy", None)) self.radioAnnual.setText(_translate("MainWindow", "Annual", None)) self.radioHistoric.setText(_translate("MainWindow", "Historic", None)) self.radioDaily.setText(_translate("MainWindow", "Daily", None)) self.groupBox_2.setTitle(_translate("MainWindow", "Purchases", None)) self.groupBox_3.setTitle(_translate("MainWindow", "Sales", None)) __sortingEnabled = self.tblBalance.isSortingEnabled() self.tblBalance.setSortingEnabled(False) item = self.tblBalance.item(0, 0) item.setText(_translate("MainWindow", "Sales (paid)", None)) item = self.tblBalance.item(0, 2) item.setText(_translate("MainWindow", "0.00", None)) item = self.tblBalance.item(1, 0) item.setText(_translate("MainWindow", "Sales (credit)", None)) item = self.tblBalance.item(1, 2) item.setText(_translate("MainWindow", "0.00", None)) item = self.tblBalance.item(2, 0) item.setText(_translate("MainWindow", "Total revenue", None)) item = self.tblBalance.item(2, 2) item.setText(_translate("MainWindow", "0.00", None)) item = self.tblBalance.item(3, 0) item.setText(_translate("MainWindow", "Costs", None)) item = self.tblBalance.item(3, 1) item.setText(_translate("MainWindow", "0.00", None)) item = self.tblBalance.item(4, 0) item.setText(_translate("MainWindow", "Taxes", None)) item = self.tblBalance.item(4, 1) item.setText(_translate("MainWindow", "0.00", None)) item = self.tblBalance.item(5, 0) item.setText(_translate("MainWindow", "Profit", None)) item = self.tblBalance.item(5, 2) item.setText(_translate("MainWindow", "0.00", None)) item = self.tblBalance.item(6, 0) item.setText(_translate("MainWindow", "Profit (margin)", None)) item = self.tblBalance.item(6, 2) item.setText(_translate("MainWindow", "0.00", None)) self.tblBalance.setSortingEnabled(__sortingEnabled) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_balance), _translate("MainWindow", "Balance", None)) self.btnModifyInventory.setToolTip(_translate("MainWindow", "Modify inventory", None)) self.btnMove.setToolTip(_translate("MainWindow", "Move Item", None)) self.leditInventory.setPlaceholderText(_translate("MainWindow", "Search...", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_inventory), _translate("MainWindow", "Inventory", None)) self.btnRemovePurchase.setToolTip(_translate("MainWindow", "Remove purchase", None)) self.leditPurchases.setPlaceholderText(_translate("MainWindow", "Search...", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_purchases), _translate("MainWindow", "Purchases", None)) self.btnRemoveSale.setToolTip(_translate("MainWindow", "Reverse sale", None)) self.btnSettle.setToolTip(_translate("MainWindow", "Settle debt", None)) self.leditSales.setPlaceholderText(_translate("MainWindow", "Search...", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_sales), _translate("MainWindow", "Sales", None)) self.btnRemoveClient.setToolTip(_translate("MainWindow", "Remove client", None)) self.btnModifyClient.setToolTip(_translate("MainWindow", "Modify Client", None)) self.leditClients.setPlaceholderText(_translate("MainWindow", "Search...", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_clients), _translate("MainWindow", "Clients", None)) self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None)) self.actionPurchase.setText(_translate("MainWindow", "Purchase", None)) self.actionSale.setText(_translate("MainWindow", "Sale", None)) self.actionClient.setText(_translate("MainWindow", "Client", None)) self.actionRefresh.setText(_translate("MainWindow", "Refresh", None)) import res_rc
/gui_login.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_login.ui' # # Created by: PyQt4 UI code generator 4.12.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName(_fromUtf8("Dialog")) Dialog.resize(172, 150) Dialog.setMinimumSize(QtCore.QSize(172, 150)) Dialog.setMaximumSize(QtCore.QSize(172, 150)) font = QtGui.QFont() font.setFamily(_fromUtf8("Arial")) font.setPointSize(10) font.setStyleStrategy(QtGui.QFont.NoAntialias) Dialog.setFont(font) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/access-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) Dialog.setWindowIcon(icon) self.verticalLayout = QtGui.QVBoxLayout(Dialog) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.label = QtGui.QLabel(Dialog) font = QtGui.QFont() font.setPointSize(17) font.setBold(True) font.setWeight(75) self.label.setFont(font) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label.setObjectName(_fromUtf8("label")) self.verticalLayout.addWidget(self.label) self.leditUser = QtGui.QLineEdit(Dialog) self.leditUser.setAlignment(QtCore.Qt.AlignCenter) self.leditUser.setObjectName(_fromUtf8("leditUser")) self.verticalLayout.addWidget(self.leditUser) self.leditPassword = QtGui.QLineEdit(Dialog) self.leditPassword.setEchoMode(QtGui.QLineEdit.Password) self.leditPassword.setAlignment(QtCore.Qt.AlignCenter) self.leditPassword.setObjectName(_fromUtf8("leditPassword")) self.verticalLayout.addWidget(self.leditPassword) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.btnLogin = QtGui.QPushButton(Dialog) self.btnLogin.setObjectName(_fromUtf8("btnLogin")) self.horizontalLayout.addWidget(self.btnLogin) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): Dialog.setWindowTitle(_translate("Dialog", "Inventory", None)) self.label.setText(_translate("Dialog", "Member Login", None)) self.leditUser.setPlaceholderText(_translate("Dialog", "Username", None)) self.leditPassword.setPlaceholderText(_translate("Dialog", "Password", None)) self.btnLogin.setText(_translate("Dialog", "Login", None)) import res_rc
/gui_move.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_mover.ui' # # Created by: PyQt4 UI code generator 4.12.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName(_fromUtf8("Dialog")) Dialog.resize(313, 99) Dialog.setMinimumSize(QtCore.QSize(227, 99)) Dialog.setMaximumSize(QtCore.QSize(500, 99)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/swap-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) Dialog.setWindowIcon(icon) self.verticalLayout = QtGui.QVBoxLayout(Dialog) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setContentsMargins(-1, -1, 0, -1) self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.label = QtGui.QLabel(Dialog) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout_2.addWidget(self.label) self.spnboxQuantity = QtGui.QSpinBox(Dialog) self.spnboxQuantity.setMinimum(1) self.spnboxQuantity.setMaximum(99999) self.spnboxQuantity.setObjectName(_fromUtf8("spnboxQuantity")) self.horizontalLayout_2.addWidget(self.spnboxQuantity) self.leditCode = QtGui.QLineEdit(Dialog) self.leditCode.setReadOnly(True) self.leditCode.setObjectName(_fromUtf8("leditCode")) self.horizontalLayout_2.addWidget(self.leditCode) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.label_3 = QtGui.QLabel(Dialog) self.label_3.setObjectName(_fromUtf8("label_3")) self.horizontalLayout.addWidget(self.label_3) self.leditFromGroup = QtGui.QLineEdit(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.leditFromGroup.sizePolicy().hasHeightForWidth()) self.leditFromGroup.setSizePolicy(sizePolicy) self.leditFromGroup.setReadOnly(True) self.leditFromGroup.setObjectName(_fromUtf8("leditFromGroup")) self.horizontalLayout.addWidget(self.leditFromGroup) self.label_4 = QtGui.QLabel(Dialog) self.label_4.setObjectName(_fromUtf8("label_4")) self.horizontalLayout.addWidget(self.label_4) self.cmboxToGroup = QtGui.QComboBox(Dialog) self.cmboxToGroup.setEditable(True) self.cmboxToGroup.setObjectName(_fromUtf8("cmboxToGroup")) self.horizontalLayout.addWidget(self.cmboxToGroup) self.verticalLayout.addLayout(self.horizontalLayout) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem) self.btnConfirm = QtGui.QPushButton(Dialog) self.btnConfirm.setObjectName(_fromUtf8("btnConfirm")) self.horizontalLayout_3.addWidget(self.btnConfirm) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout_3) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): Dialog.setWindowTitle(_translate("Dialog", "Move", None)) self.label.setText(_translate("Dialog", "Move:", None)) self.label_3.setText(_translate("Dialog", "From group:", None)) self.label_4.setText(_translate("Dialog", "To group:", None)) self.btnConfirm.setText(_translate("Dialog", "Confirm", None)) import res_rc
/gui_purchase.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_compra.ui' # # Created by: PyQt4 UI code generator 4.12.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName(_fromUtf8("Dialog")) Dialog.setWindowModality(QtCore.Qt.NonModal) Dialog.resize(220, 366) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth()) Dialog.setSizePolicy(sizePolicy) Dialog.setMinimumSize(QtCore.QSize(220, 366)) Dialog.setMaximumSize(QtCore.QSize(400, 366)) font = QtGui.QFont() font.setPointSize(10) Dialog.setFont(font) Dialog.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/plus-icon-0.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) Dialog.setWindowIcon(icon) Dialog.setSizeGripEnabled(False) Dialog.setModal(True) self.verticalLayout = QtGui.QVBoxLayout(Dialog) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.formLayout = QtGui.QFormLayout() self.formLayout.setObjectName(_fromUtf8("formLayout")) self.label = QtGui.QLabel(Dialog) self.label.setObjectName(_fromUtf8("label")) self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label) self.label_2 = QtGui.QLabel(Dialog) self.label_2.setObjectName(_fromUtf8("label_2")) self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_2) self.leditName = QtGui.QLineEdit(Dialog) self.leditName.setPlaceholderText(_fromUtf8("")) self.leditName.setObjectName(_fromUtf8("leditName")) self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.leditName) self.label_5 = QtGui.QLabel(Dialog) self.label_5.setObjectName(_fromUtf8("label_5")) self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_5) self.label_3 = QtGui.QLabel(Dialog) self.label_3.setObjectName(_fromUtf8("label_3")) self.formLayout.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_3) self.spnBoxQuantity = QtGui.QSpinBox(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.spnBoxQuantity.sizePolicy().hasHeightForWidth()) self.spnBoxQuantity.setSizePolicy(sizePolicy) self.spnBoxQuantity.setMinimumSize(QtCore.QSize(0, 0)) self.spnBoxQuantity.setWrapping(False) self.spnBoxQuantity.setFrame(True) self.spnBoxQuantity.setButtonSymbols(QtGui.QAbstractSpinBox.UpDownArrows) self.spnBoxQuantity.setAccelerated(False) self.spnBoxQuantity.setMaximum(999999) self.spnBoxQuantity.setProperty("value", 1) self.spnBoxQuantity.setObjectName(_fromUtf8("spnBoxQuantity")) self.formLayout.setWidget(5, QtGui.QFormLayout.FieldRole, self.spnBoxQuantity) self.label_4 = QtGui.QLabel(Dialog) self.label_4.setObjectName(_fromUtf8("label_4")) self.formLayout.setWidget(9, QtGui.QFormLayout.LabelRole, self.label_4) self.label_6 = QtGui.QLabel(Dialog) self.label_6.setObjectName(_fromUtf8("label_6")) self.formLayout.setWidget(10, QtGui.QFormLayout.LabelRole, self.label_6) self.leditVendor = QtGui.QLineEdit(Dialog) self.leditVendor.setPlaceholderText(_fromUtf8("")) self.leditVendor.setObjectName(_fromUtf8("leditVendor")) self.formLayout.setWidget(10, QtGui.QFormLayout.FieldRole, self.leditVendor) self.label_7 = QtGui.QLabel(Dialog) self.label_7.setObjectName(_fromUtf8("label_7")) self.formLayout.setWidget(11, QtGui.QFormLayout.LabelRole, self.label_7) self.spnBoxMin = QtGui.QSpinBox(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.spnBoxMin.sizePolicy().hasHeightForWidth()) self.spnBoxMin.setSizePolicy(sizePolicy) self.spnBoxMin.setAccelerated(False) self.spnBoxMin.setMaximum(999999) self.spnBoxMin.setProperty("value", 1) self.spnBoxMin.setObjectName(_fromUtf8("spnBoxMin")) self.formLayout.setWidget(11, QtGui.QFormLayout.FieldRole, self.spnBoxMin) self.spnBoxMax = QtGui.QSpinBox(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.spnBoxMax.sizePolicy().hasHeightForWidth()) self.spnBoxMax.setSizePolicy(sizePolicy) self.spnBoxMax.setAccelerated(True) self.spnBoxMax.setMaximum(999999) self.spnBoxMax.setProperty("value", 100) self.spnBoxMax.setObjectName(_fromUtf8("spnBoxMax")) self.formLayout.setWidget(12, QtGui.QFormLayout.FieldRole, self.spnBoxMax) self.label_9 = QtGui.QLabel(Dialog) self.label_9.setObjectName(_fromUtf8("label_9")) self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_9) self.label_10 = QtGui.QLabel(Dialog) self.label_10.setObjectName(_fromUtf8("label_10")) self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_10) self.label_8 = QtGui.QLabel(Dialog) self.label_8.setObjectName(_fromUtf8("label_8")) self.formLayout.setWidget(12, QtGui.QFormLayout.LabelRole, self.label_8) self.cmBoxCode = QtGui.QComboBox(Dialog) self.cmBoxCode.setEditable(True) self.cmBoxCode.setObjectName(_fromUtf8("cmBoxCode")) self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.cmBoxCode) self.spnboxCost = QtGui.QDoubleSpinBox(Dialog) self.spnboxCost.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnboxCost.setKeyboardTracking(False) self.spnboxCost.setSuffix(_fromUtf8("")) self.spnboxCost.setDecimals(2) self.spnboxCost.setMaximum(9999.0) self.spnboxCost.setObjectName(_fromUtf8("spnboxCost")) self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.spnboxCost) self.spnboxMargin = QtGui.QDoubleSpinBox(Dialog) self.spnboxMargin.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnboxMargin.setKeyboardTracking(False) self.spnboxMargin.setMaximum(9999.0) self.spnboxMargin.setObjectName(_fromUtf8("spnboxMargin")) self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.spnboxMargin) self.spnboxPrice = QtGui.QDoubleSpinBox(Dialog) self.spnboxPrice.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnboxPrice.setKeyboardTracking(False) self.spnboxPrice.setMaximum(99999.0) self.spnboxPrice.setObjectName(_fromUtf8("spnboxPrice")) self.formLayout.setWidget(7, QtGui.QFormLayout.FieldRole, self.spnboxPrice) self.cmboxCategory = QtGui.QComboBox(Dialog) self.cmboxCategory.setEditable(True) self.cmboxCategory.setObjectName(_fromUtf8("cmboxCategory")) self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.cmboxCategory) self.label_11 = QtGui.QLabel(Dialog) self.label_11.setObjectName(_fromUtf8("label_11")) self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_11) self.cmboxGroup = QtGui.QComboBox(Dialog) self.cmboxGroup.setEditable(True) self.cmboxGroup.setObjectName(_fromUtf8("cmboxGroup")) self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.cmboxGroup) self.verticalLayout.addLayout(self.formLayout) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.btnAdd = QtGui.QPushButton(Dialog) self.btnAdd.setAutoDefault(False) self.btnAdd.setDefault(False) self.btnAdd.setObjectName(_fromUtf8("btnAdd")) self.horizontalLayout.addWidget(self.btnAdd) self.btnUndo = QtGui.QPushButton(Dialog) self.btnUndo.setAutoDefault(False) self.btnUndo.setObjectName(_fromUtf8("btnUndo")) self.horizontalLayout.addWidget(self.btnUndo) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): Dialog.setWindowTitle(_translate("Dialog", "Purchase", None)) Dialog.setWhatsThis(_translate("Dialog", "Write a code and press ENTER.\n" "the fields will fill out automatically if this code was recorded previously", None)) self.label.setText(_translate("Dialog", "Code:", None)) self.label_2.setText(_translate("Dialog", "Name:", None)) self.label_5.setText(_translate("Dialog", "Unit Cost:", None)) self.label_3.setText(_translate("Dialog", "Quantity:", None)) self.label_4.setText(_translate("Dialog", "Category:", None)) self.label_6.setText(_translate("Dialog", "Vendor:", None)) self.label_7.setText(_translate("Dialog", "Minimum Quantity:", None)) self.label_9.setText(_translate("Dialog", "Suggested Price:", None)) self.label_10.setText(_translate("Dialog", "Profit Margin:", None)) self.label_8.setText(_translate("Dialog", "Maximum Quantity:", None)) self.spnboxCost.setPrefix(_translate("Dialog", "$ ", None)) self.spnboxMargin.setPrefix(_translate("Dialog", "% ", None)) self.spnboxPrice.setPrefix(_translate("Dialog", "$ ", None)) self.label_11.setText(_translate("Dialog", "Group:", None)) self.btnAdd.setText(_translate("Dialog", "Add", None)) self.btnUndo.setText(_translate("Dialog", "Undo", None)) import res_rc
/gui_sale.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'gui_venta.ui' # # Created by: PyQt4 UI code generator 4.12.1 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName(_fromUtf8("Dialog")) Dialog.setWindowModality(QtCore.Qt.WindowModal) Dialog.resize(1311, 488) Dialog.setMinimumSize(QtCore.QSize(750, 488)) Dialog.setMaximumSize(QtCore.QSize(16777215, 488)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/resources/product_basket-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) Dialog.setWindowIcon(icon) Dialog.setModal(True) self.horizontalLayout = QtGui.QHBoxLayout(Dialog) self.horizontalLayout.setContentsMargins(9, -1, -1, -1) self.horizontalLayout.setSpacing(8) self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.groupBox_2 = QtGui.QGroupBox(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth()) self.groupBox_2.setSizePolicy(sizePolicy) self.groupBox_2.setObjectName(_fromUtf8("groupBox_2")) self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2) self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.horizontalLayout_4 = QtGui.QHBoxLayout() self.horizontalLayout_4.setContentsMargins(-1, 0, -1, -1) self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4")) self.cmboxInventory = QtGui.QComboBox(self.groupBox_2) self.cmboxInventory.setObjectName(_fromUtf8("cmboxInventory")) self.horizontalLayout_4.addWidget(self.cmboxInventory) self.leditInventory = QtGui.QLineEdit(self.groupBox_2) self.leditInventory.setObjectName(_fromUtf8("leditInventory")) self.horizontalLayout_4.addWidget(self.leditInventory) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout_4.addItem(spacerItem) self.verticalLayout_2.addLayout(self.horizontalLayout_4) self.tblInventory = QtGui.QTableView(self.groupBox_2) self.tblInventory.setMinimumSize(QtCore.QSize(280, 0)) self.tblInventory.setAlternatingRowColors(True) self.tblInventory.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblInventory.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblInventory.setSortingEnabled(True) self.tblInventory.setObjectName(_fromUtf8("tblInventory")) self.tblInventory.horizontalHeader().setStretchLastSection(True) self.tblInventory.verticalHeader().setVisible(False) self.verticalLayout_2.addWidget(self.tblInventory) self.horizontalLayout.addWidget(self.groupBox_2) self.verticalLayout = QtGui.QVBoxLayout() self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.formLayout_2 = QtGui.QFormLayout() self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow) self.formLayout_2.setObjectName(_fromUtf8("formLayout_2")) self.label_11 = QtGui.QLabel(Dialog) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_11.setFont(font) self.label_11.setObjectName(_fromUtf8("label_11")) self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_11) self.label = QtGui.QLabel(Dialog) self.label.setObjectName(_fromUtf8("label")) self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label) self.leditCode = QtGui.QLineEdit(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.leditCode.sizePolicy().hasHeightForWidth()) self.leditCode.setSizePolicy(sizePolicy) self.leditCode.setReadOnly(True) self.leditCode.setPlaceholderText(_fromUtf8("")) self.leditCode.setObjectName(_fromUtf8("leditCode")) self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.leditCode) self.label_10 = QtGui.QLabel(Dialog) self.label_10.setObjectName(_fromUtf8("label_10")) self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_10) self.leditName = QtGui.QLineEdit(Dialog) self.leditName.setReadOnly(True) self.leditName.setObjectName(_fromUtf8("leditName")) self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.leditName) self.label_6 = QtGui.QLabel(Dialog) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_6.setFont(font) self.label_6.setObjectName(_fromUtf8("label_6")) self.formLayout_2.setWidget(5, QtGui.QFormLayout.LabelRole, self.label_6) self.label_2 = QtGui.QLabel(Dialog) self.label_2.setObjectName(_fromUtf8("label_2")) self.formLayout_2.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_2) self.spnBoxQuantity = QtGui.QSpinBox(Dialog) self.spnBoxQuantity.setAccelerated(True) self.spnBoxQuantity.setKeyboardTracking(False) self.spnBoxQuantity.setMinimum(1) self.spnBoxQuantity.setMaximum(999999) self.spnBoxQuantity.setProperty("value", 1) self.spnBoxQuantity.setObjectName(_fromUtf8("spnBoxQuantity")) self.formLayout_2.setWidget(6, QtGui.QFormLayout.FieldRole, self.spnBoxQuantity) self.label_3 = QtGui.QLabel(Dialog) self.label_3.setObjectName(_fromUtf8("label_3")) self.formLayout_2.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_3) self.label_8 = QtGui.QLabel(Dialog) self.label_8.setObjectName(_fromUtf8("label_8")) self.formLayout_2.setWidget(8, QtGui.QFormLayout.LabelRole, self.label_8) self.chkBoxItbms = QtGui.QCheckBox(Dialog) self.chkBoxItbms.setChecked(True) self.chkBoxItbms.setTristate(False) self.chkBoxItbms.setObjectName(_fromUtf8("chkBoxItbms")) self.formLayout_2.setWidget(9, QtGui.QFormLayout.LabelRole, self.chkBoxItbms) self.label_9 = QtGui.QLabel(Dialog) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.label_9.setFont(font) self.label_9.setObjectName(_fromUtf8("label_9")) self.formLayout_2.setWidget(10, QtGui.QFormLayout.LabelRole, self.label_9) self.spnBoxTotalItemPrice = QtGui.QDoubleSpinBox(Dialog) self.spnBoxTotalItemPrice.setReadOnly(True) self.spnBoxTotalItemPrice.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnBoxTotalItemPrice.setSuffix(_fromUtf8("")) self.spnBoxTotalItemPrice.setMaximum(999999.0) self.spnBoxTotalItemPrice.setObjectName(_fromUtf8("spnBoxTotalItemPrice")) self.formLayout_2.setWidget(10, QtGui.QFormLayout.FieldRole, self.spnBoxTotalItemPrice) self.label_13 = QtGui.QLabel(Dialog) self.label_13.setObjectName(_fromUtf8("label_13")) self.formLayout_2.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_13) self.cmboxClient = QtGui.QComboBox(Dialog) self.cmboxClient.setEditable(True) self.cmboxClient.setObjectName(_fromUtf8("cmboxClient")) self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.cmboxClient) self.chkBoxCredit = QtGui.QCheckBox(Dialog) self.chkBoxCredit.setObjectName(_fromUtf8("chkBoxCredit")) self.formLayout_2.setWidget(9, QtGui.QFormLayout.FieldRole, self.chkBoxCredit) self.spnboxCost = QtGui.QDoubleSpinBox(Dialog) self.spnboxCost.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.spnboxCost.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnboxCost.setDecimals(2) self.spnboxCost.setMaximum(99999.0) self.spnboxCost.setObjectName(_fromUtf8("spnboxCost")) self.formLayout_2.setWidget(4, QtGui.QFormLayout.FieldRole, self.spnboxCost) self.spnboxPrice = QtGui.QDoubleSpinBox(Dialog) self.spnboxPrice.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnboxPrice.setMaximum(9999999.0) self.spnboxPrice.setObjectName(_fromUtf8("spnboxPrice")) self.formLayout_2.setWidget(5, QtGui.QFormLayout.FieldRole, self.spnboxPrice) self.spnBoxMargin = QtGui.QDoubleSpinBox(Dialog) self.spnBoxMargin.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnBoxMargin.setMaximum(999999.0) self.spnBoxMargin.setObjectName(_fromUtf8("spnBoxMargin")) self.formLayout_2.setWidget(7, QtGui.QFormLayout.FieldRole, self.spnBoxMargin) self.spnboxDiscount = QtGui.QDoubleSpinBox(Dialog) self.spnboxDiscount.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnboxDiscount.setMaximum(99999.0) self.spnboxDiscount.setObjectName(_fromUtf8("spnboxDiscount")) self.formLayout_2.setWidget(8, QtGui.QFormLayout.FieldRole, self.spnboxDiscount) self.label_14 = QtGui.QLabel(Dialog) self.label_14.setObjectName(_fromUtf8("label_14")) self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_14) self.leditGroup = QtGui.QLineEdit(Dialog) self.leditGroup.setReadOnly(True) self.leditGroup.setObjectName(_fromUtf8("leditGroup")) self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.leditGroup) self.verticalLayout.addLayout(self.formLayout_2) self.horizontalLayout_3 = QtGui.QHBoxLayout() self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3")) self.btnInsert = QtGui.QPushButton(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.btnInsert.sizePolicy().hasHeightForWidth()) self.btnInsert.setSizePolicy(sizePolicy) self.btnInsert.setAutoDefault(False) self.btnInsert.setDefault(False) self.btnInsert.setObjectName(_fromUtf8("btnInsert")) self.horizontalLayout_3.addWidget(self.btnInsert) self.btnUndo = QtGui.QPushButton(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.btnUndo.sizePolicy().hasHeightForWidth()) self.btnUndo.setSizePolicy(sizePolicy) self.btnUndo.setAutoDefault(False) self.btnUndo.setObjectName(_fromUtf8("btnUndo")) self.horizontalLayout_3.addWidget(self.btnUndo) self.verticalLayout.addLayout(self.horizontalLayout_3) self.formLayout = QtGui.QFormLayout() self.formLayout.setObjectName(_fromUtf8("formLayout")) self.label_4 = QtGui.QLabel(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth()) self.label_4.setSizePolicy(sizePolicy) self.label_4.setObjectName(_fromUtf8("label_4")) self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_4) self.spnBoxSubtotal = QtGui.QDoubleSpinBox(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.spnBoxSubtotal.sizePolicy().hasHeightForWidth()) self.spnBoxSubtotal.setSizePolicy(sizePolicy) self.spnBoxSubtotal.setStyleSheet(_fromUtf8("")) self.spnBoxSubtotal.setWrapping(False) self.spnBoxSubtotal.setReadOnly(True) self.spnBoxSubtotal.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnBoxSubtotal.setMaximum(99999.99) self.spnBoxSubtotal.setObjectName(_fromUtf8("spnBoxSubtotal")) self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.spnBoxSubtotal) self.label_7 = QtGui.QLabel(Dialog) self.label_7.setObjectName(_fromUtf8("label_7")) self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_7) self.spnBoxTaxT = QtGui.QDoubleSpinBox(Dialog) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.spnBoxTaxT.sizePolicy().hasHeightForWidth()) self.spnBoxTaxT.setSizePolicy(sizePolicy) self.spnBoxTaxT.setReadOnly(True) self.spnBoxTaxT.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnBoxTaxT.setMaximum(999999.0) self.spnBoxTaxT.setObjectName(_fromUtf8("spnBoxTaxT")) self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.spnBoxTaxT) self.label_12 = QtGui.QLabel(Dialog) self.label_12.setObjectName(_fromUtf8("label_12")) self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_12) self.spnBoxDiscountT = QtGui.QDoubleSpinBox(Dialog) self.spnBoxDiscountT.setReadOnly(True) self.spnBoxDiscountT.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnBoxDiscountT.setMaximum(99999.0) self.spnBoxDiscountT.setObjectName(_fromUtf8("spnBoxDiscountT")) self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.spnBoxDiscountT) self.verticalLayout.addLayout(self.formLayout) self.line = QtGui.QFrame(Dialog) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.verticalLayout.addWidget(self.line) self.formLayout_3 = QtGui.QFormLayout() self.formLayout_3.setObjectName(_fromUtf8("formLayout_3")) self.label_5 = QtGui.QLabel(Dialog) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) self.label_5.setFont(font) self.label_5.setFrameShape(QtGui.QFrame.NoFrame) self.label_5.setFrameShadow(QtGui.QFrame.Plain) self.label_5.setScaledContents(False) self.label_5.setWordWrap(False) self.label_5.setObjectName(_fromUtf8("label_5")) self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_5) self.spnBoxGrandTotal = QtGui.QDoubleSpinBox(Dialog) font = QtGui.QFont() font.setPointSize(11) font.setBold(True) font.setWeight(75) font.setStrikeOut(False) self.spnBoxGrandTotal.setFont(font) self.spnBoxGrandTotal.setAutoFillBackground(False) self.spnBoxGrandTotal.setStyleSheet(_fromUtf8("")) self.spnBoxGrandTotal.setFrame(True) self.spnBoxGrandTotal.setReadOnly(True) self.spnBoxGrandTotal.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons) self.spnBoxGrandTotal.setMaximum(999999.0) self.spnBoxGrandTotal.setObjectName(_fromUtf8("spnBoxGrandTotal")) self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.spnBoxGrandTotal) self.verticalLayout.addLayout(self.formLayout_3) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.btnConfirm = QtGui.QPushButton(Dialog) self.btnConfirm.setAutoDefault(False) self.btnConfirm.setObjectName(_fromUtf8("btnConfirm")) self.horizontalLayout_2.addWidget(self.btnConfirm) self.btnDelete = QtGui.QPushButton(Dialog) self.btnDelete.setAutoDefault(False) self.btnDelete.setObjectName(_fromUtf8("btnDelete")) self.horizontalLayout_2.addWidget(self.btnDelete) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout.addLayout(self.verticalLayout) self.groupBox = QtGui.QGroupBox(Dialog) self.groupBox.setObjectName(_fromUtf8("groupBox")) self.gridLayout = QtGui.QGridLayout(self.groupBox) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.tblItems = QtGui.QTableView(self.groupBox) self.tblItems.setAlternatingRowColors(True) self.tblItems.setSelectionMode(QtGui.QAbstractItemView.SingleSelection) self.tblItems.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows) self.tblItems.setSortingEnabled(False) self.tblItems.setCornerButtonEnabled(False) self.tblItems.setObjectName(_fromUtf8("tblItems")) self.tblItems.horizontalHeader().setStretchLastSection(True) self.gridLayout.addWidget(self.tblItems, 0, 1, 1, 1) self.horizontalLayout.addWidget(self.groupBox) self.horizontalLayout.setStretch(0, 1) self.horizontalLayout.setStretch(2, 1) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): Dialog.setWindowTitle(_translate("Dialog", "Sale", None)) self.groupBox_2.setTitle(_translate("Dialog", "Inventory", None)) self.leditInventory.setPlaceholderText(_translate("Dialog", "Search...", None)) self.label_11.setText(_translate("Dialog", "Client:", None)) self.label.setText(_translate("Dialog", "Code:", None)) self.leditCode.setToolTip(_translate("Dialog", "Press Enter to \n" "search item by code", None)) self.leditCode.setWhatsThis(_translate("Dialog", "Insert the code item here\n" "", None)) self.label_10.setText(_translate("Dialog", "Name:", None)) self.label_6.setText(_translate("Dialog", "Item Price:", None)) self.label_2.setText(_translate("Dialog", "Quantity:", None)) self.label_3.setText(_translate("Dialog", "Margin:", None)) self.label_8.setText(_translate("Dialog", "Discount:", None)) self.chkBoxItbms.setText(_translate("Dialog", "Include Tax", None)) self.label_9.setText(_translate("Dialog", "Total Item Price:", None)) self.spnBoxTotalItemPrice.setPrefix(_translate("Dialog", "$ ", None)) self.label_13.setText(_translate("Dialog", "Cost:", None)) self.chkBoxCredit.setText(_translate("Dialog", "Credit", None)) self.spnboxCost.setPrefix(_translate("Dialog", "$ ", None)) self.spnboxPrice.setPrefix(_translate("Dialog", "$ ", None)) self.spnBoxMargin.setPrefix(_translate("Dialog", "% ", None)) self.spnboxDiscount.setPrefix(_translate("Dialog", "% ", None)) self.label_14.setText(_translate("Dialog", "Group:", None)) self.btnInsert.setText(_translate("Dialog", "Insert", None)) self.btnUndo.setText(_translate("Dialog", "Undo", None)) self.label_4.setText(_translate("Dialog", "Subtotal:", None)) self.spnBoxSubtotal.setPrefix(_translate("Dialog", "$ ", None)) self.label_7.setText(_translate("Dialog", "Sales Tax:", None)) self.spnBoxTaxT.setToolTip(_translate("Dialog", "7.00%", None)) self.spnBoxTaxT.setPrefix(_translate("Dialog", "$ ", None)) self.label_12.setText(_translate("Dialog", "Discount:", None)) self.spnBoxDiscountT.setPrefix(_translate("Dialog", "$ ", None)) self.label_5.setText(_translate("Dialog", "Grand Total:", None)) self.spnBoxGrandTotal.setToolTip(_translate("Dialog", "SubTotal + \n" "ITBMS (7.00%)", None)) self.spnBoxGrandTotal.setPrefix(_translate("Dialog", "$ ", None)) self.btnConfirm.setText(_translate("Dialog", "Confirm", None)) self.btnDelete.setText(_translate("Dialog", "Delete Entry", None)) self.groupBox.setTitle(_translate("Dialog", "Items", None)) import res_rc
/main_login.py
import sys import sqlite3 from PyQt4 import QtCore, QtGui, QtSql from gui_login import Ui_Dialog as LoginGui import master_admin import mec_login from mec_login import check_login # tengo que importarlo class Login(QtGui.QDialog, LoginGui): def __init__(self, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) ### functionality ### self.btnLogin.clicked.connect(self.start) ### database ### self.conn = sqlite3.connect(".libra.db") self.c = self.conn.cursor() mec_login.create_login_table(self.c, self.conn) self.show() def start(self): usuario = self.leditUser.text() password = self.leditPassword.text() if check_login(self.c, usuario, password): print("success") self.accept() else: self.leditUser.clear() self.leditPassword.clear() QtGui.QMessageBox.warning(self, 'Error', 'Incorrect username or password') def closeEvent(self, e): print("closing") self.c.close() self.conn.close() e.accept() if __name__ == "__main__": app = QtGui.QApplication(sys.argv) login = Login() if login.exec_() == QtGui.QDialog.Accepted: mainwindow = master_admin.Inventory() mainwindow.show() sys.exit(app.exec_())
/master_admin.py
### std lib ### import sys import sqlite3 import time import os ### PyQt4 ### from PyQt4 import QtCore, QtGui, QtSql ### GUIs ### from gui_inventory import Ui_MainWindow as InventoryGui from gui_purchase import Ui_Dialog as PurchaseGui from gui_sale import Ui_Dialog as SaleGui from gui_client import Ui_Dialog as ClientGui from gui_modify import Ui_Dialog as ModifyGui from gui_move import Ui_Dialog as MoveGui from gui_client_modify import Ui_Dialog as ClientModifyGui import mec_inventory#, stresstest class Inventory (QtGui.QMainWindow, InventoryGui): ### constants ### useNas = False ### change this to use nas DB_LOCATION = ".libra.db" # database def __init__ (self, parent=None): start = time.time() ### sets up visual gui ### QtGui.QMainWindow.__init__(self, parent) # parent shit for exit bug; object hierarchy self.setupUi(self) self.setAttribute(QtCore.Qt.WA_DeleteOnClose) # maybe takes care of closing bug ### Database Connection, for qsqlquerymodel ### self.db = QtSql.QSqlDatabase.addDatabase('QSQLITE') self.db.setDatabaseName(self.DB_LOCATION) self.db.open() ### Table Models ### self.mdlClients = QtSql.QSqlQueryModel() self.mdlPurchases = QtSql.QSqlQueryModel() self.mdlSales = QtSql.QSqlQueryModel() self.mdlInventory = QtSql.QSqlQueryModel() # bal self.mdlPurchasesBal = QtSql.QSqlQueryModel() self.mdlSalesBal = QtSql.QSqlQueryModel() ### sort filter proxy model ### self.proxyInventory = QtGui.QSortFilterProxyModel() self.proxyInventory.setSourceModel(self.mdlInventory) self.proxyPurchases = QtGui.QSortFilterProxyModel() self.proxyPurchases.setSourceModel(self.mdlPurchases) self.proxySales = QtGui.QSortFilterProxyModel() self.proxySales.setSourceModel(self.mdlSales) self.proxyClients = QtGui.QSortFilterProxyModel() self.proxyClients.setSourceModel(self.mdlClients) # bal self.proxyPurchasesBal = QtGui.QSortFilterProxyModel() self.proxyPurchasesBal.setSourceModel(self.mdlPurchasesBal) self.proxySalesBal = QtGui.QSortFilterProxyModel() self.proxySalesBal.setSourceModel(self.mdlSalesBal) ### proxy filter parameters self.proxyInventory.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) # case insennsitive self.proxyPurchases.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) self.proxySales.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) self.proxyClients.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) # bal self.proxyPurchasesBal.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) self.proxySalesBal.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) #### setting models to tables ### self.tblInventory.setModel(self.proxyInventory) self.tblPurchases.setModel(self.proxyPurchases) self.tblSales.setModel(self.proxySales) self.tblClients.setModel(self.proxyClients) # bal self.tblPurchasesBal.setModel(self.proxyPurchasesBal) self.tblSalesBal.setModel(self.proxySalesBal) ### Actions functionality ### self.actionRefresh.triggered.connect(self.refreshTables) self.actionPurchase.triggered.connect(self.action_purchase) self.actionSale.triggered.connect(self.action_sale) self.actionClient.triggered.connect(self.action_client) self.btnModifyInventory.clicked.connect(self.modify_inventory) self.btnMove.clicked.connect(self.move_item) self.btnRemovePurchase.clicked.connect(self.remove_purchase) self.btnRemoveSale.clicked.connect(self.reverse_sale) self.btnSettle.clicked.connect(self.settle_debt) self.btnRemoveClient.clicked.connect(self.remove_client) self.btnModifyClient.clicked.connect(self.modify_client) self.leditInventory.textEdited.connect(lambda: self.search(self.leditInventory.text(), self.proxyInventory)) self.leditPurchases.textEdited.connect(lambda: self.search(self.leditPurchases.text(), self.proxyPurchases)) self.leditSales.textEdited.connect(lambda: self.search(self.leditSales.text(), self.proxySales)) self.leditClients.textEdited.connect(lambda: self.search(self.leditClients.text(), self.proxyClients)) self.cmboxInventory.activated.connect(lambda: self.combo_box_changed(self.cmboxInventory, self.proxyInventory)) self.cmboxPurchases.activated.connect(lambda: self.combo_box_changed(self.cmboxPurchases, self.proxyPurchases)) self.cmboxSales.activated.connect(lambda: self.combo_box_changed(self.cmboxSales, self.proxySales)) self.cmboxClients.activated.connect(lambda: self.combo_box_changed(self.cmboxClients, self.proxyClients)) self.radioHistoric.toggled.connect(lambda: self.set_balance(self.radioHistoric)) self.radioAnnual.toggled.connect(lambda: self.set_balance(self.radioAnnual)) self.radioMonthly.toggled.connect(lambda: self.set_balance(self.radioMonthly)) self.dateAnnual.dateChanged.connect(lambda: self.set_balance(self.radioAnnual)) self.dateMonthly.dateChanged.connect(lambda: self.set_balance(self.radioMonthly)) self.calBalance.selectionChanged.connect(self.calendar_changed) self.calBalance.showToday() ### Creates tables if not exists, for mec_inventario ### self.conn = sqlite3.connect(self.DB_LOCATION) self.c = self.conn.cursor() mec_inventory.create_tables(self.conn, self.c) ########################## STRESSS TESTTTTTT ################################ #stresstest.test_entries(self.conn, self.c, 10) #stresstest.test_entries(self.conn, self.c, 100) #stresstest.test_entries(self.conn, self.c, 250) #stresstest.test_entries(self.conn, self.c, 500) #stresstest.test_entries(self.conn, self.c, 1000) ################################################################################ self.set_balance(self.radioHistoric) self.refreshTables() headers = ["Code", "Name", "Group", "Available Quantity", "Unit Cost", "Suggested Price", "Minimum Quantity", "Maximum Quantity", "Category"] for i in range(len(headers)): self.mdlInventory.setHeaderData(i, QtCore.Qt.Horizontal, headers[i]) # +1 for id col self.cmboxInventory.addItems(headers) # add headers to combo box headers = ["Date", "Transaction", "Code", "Name", "Group", "Quantity", "Vendor", "Unit Cost", "Total Cost", "Category"] for i in range(len(headers)): self.mdlPurchases.setHeaderData(i, QtCore.Qt.Horizontal, headers[i]) self.cmboxPurchases.addItems(headers) headers = ["Date", "Transaction", "Code", "Name", "Group", "Quantity", "Unit Price", "Total Price", "Client", "Pay"] for i in range(len(headers)): self.mdlSales.setHeaderData(i, QtCore.Qt.Horizontal, headers[i]) self.cmboxSales.addItems(headers) headers = ["ID", "Name", "Invested", "Debt", "E-mail", "Phone", "Cellphone"] for i in range(len(headers)): self.mdlClients.setHeaderData(i, QtCore.Qt.Horizontal, headers[i]) self.cmboxClients.addItems(headers) # headers bal headers = ["Date", "Transaction", "Code", "Quantity", "Total Cost"] for i in range(len(headers)): self.mdlPurchasesBal.setHeaderData(i, QtCore.Qt.Horizontal, headers[i]) headers = ["Date", "Transaction", "Code", "Quantity", "Total Price"] for i in range(len(headers)): self.mdlSalesBal.setHeaderData(i, QtCore.Qt.Horizontal, headers[i]) ### table uniform stretch ### self.tblInventory.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) self.tblPurchases.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) self.tblSales.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) self.tblClients.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) # bal stretch self.tblBalance.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch) self.tblBalance.verticalHeader().setResizeMode(QtGui.QHeaderView.Stretch) self.tblPurchasesBal.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) self.tblSalesBal.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) end = time.time() print("constructor time: " + str(end - start)) def refreshTables(self): start = time.time() self.mdlInventory.setQuery("""SELECT code, name, groupx, avail, costUni, priceUniSug, stockmin, stockmax, category FROM Inventory""", self.db) self.mdlPurchases.setQuery("""SELECT dat, trans, code, name, groupx, quantity, provider, costUni, costItems, category FROM Entries""", self.db) self.mdlSales.setQuery("""SELECT dat, trans, code, name, groupx, quantity, priceUni, priceItems, client, payment FROM Outs""", self.db) self.mdlClients.setQuery("""SELECT identification, name, money_invested, debt, mail, num, cel FROM Clients""", self.db) # bal tables self.mdlPurchasesBal.setQuery(""" SELECT dat, trans, code, quantity, costItems FROM Entries """, self.db) self.mdlSalesBal.setQuery("""SELECT dat, trans, code, quantity, priceItems FROM Outs""", self.db) end = time.time() print("refresh time: " + str(end - start)) def settle_debt(self): index = self.tblSales.selectionModel().selectedRows() if index: row = int(index[0].row()) # selected row code = self.proxySales.data(self.proxySales.index(row, 1)) # 0 = fecha, 1 = codigo msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Settle", "Are you sure you wish to settle\n" "the debt generated by sale number: " + code + "?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: mec_inventory.paid(self.conn, self.c, code) QtGui.QMessageBox.information(self, 'Message', "The debt generated by sale number: " + code + "\nhas been settled successfully") self.refreshTables() else: QtGui.QMessageBox.information(self, 'Message', "Please select the sale by\n" + "credit you wish to settle") def calendar_changed(self): start = time.time() self.radioDaily.setChecked(True) date = str(self.calBalance.selectedDate().toPyDate()) self.search(date, self.proxyPurchasesBal) self.search(date, self.proxySalesBal) items = mec_inventory.calc_bal_day(self.c, date[0:4], date[5:7], date[8:10]) self.tblBalance.setItem(0, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[2]))) # ventas contado self.tblBalance.setItem(1, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[3]))) # ventas credito self.tblBalance.setItem(2, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[1]))) # ingreso tot self.tblBalance.setItem(3, 1, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[0]))) # costo self.tblBalance.setItem(4, 1, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[5]))) # impuesto self.tblBalance.setItem(5, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[6]))) # ganancia if items[0] != 0: self.tblBalance.setItem(6, 2, QtGui.QTableWidgetItem('% {0:.2f}'.format(items[6]/items[0] * 100))) else: self.tblBalance.setItem(6, 2, QtGui.QTableWidgetItem('% 0.00')) end = time.time() print("cal: " + str(end - start)) def set_balance(self, radioButton): start = time.time() if radioButton.isChecked(): items = [] if radioButton == self.radioHistoric: self.search("", self.proxyPurchasesBal) self.search("", self.proxySalesBal) items = mec_inventory.calc_bal_his(self.c) # [costoTot,precioTot,cd,cc,ingresoTot,impuestoTot,gananciaTot] elif radioButton == self.radioAnnual: date = str(self.dateAnnual.date().toPyDate()) self.search(date[0:4], self.proxyPurchasesBal) self.search(date[0:4], self.proxySalesBal) items = mec_inventory.calc_bal_year(self.c, date[0:4]) # [costoTot,precioTot,cd,cc,ingresoTot,impuestoTot,gananciaTot] else: # radio mensual date = str(self.dateMonthly.date().toPyDate()) self.search((date[0:4] + "-" + date[5:7]), self.proxyPurchasesBal) self.search((date[0:4] + "-" + date[5:7]), self.proxySalesBal) items = mec_inventory.calc_bal_mes(self.c, date[0:4], date[5:7]) # [costoTot,precioTot,cd,cc,ingresoTot,impuestoTot,gananciaTot] self.tblBalance.setItem(0, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[2]))) self.tblBalance.setItem(1, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[3]))) self.tblBalance.setItem(2, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[1]))) self.tblBalance.setItem(3, 1, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[0]))) self.tblBalance.setItem(4, 1, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[5]))) self.tblBalance.setItem(5, 2, QtGui.QTableWidgetItem('$ {0:.2f}'.format(items[6]))) if items[0] != 0: self.tblBalance.setItem(6, 2, QtGui.QTableWidgetItem('% {0:.2f}'.format(items[6]/items[0] * 100))) else: self.tblBalance.setItem(6, 2, QtGui.QTableWidgetItem('% 0.00')) end = time.time() print("bal: " + str(end - start)) def combo_box_changed(self, comboBox, proxy): proxy.setFilterKeyColumn(comboBox.currentIndex()) def search(self, text, proxy): proxy.setFilterRegExp("^" + text) def move_item(self): index = self.tblInventory.selectionModel().selectedRows() ### list of indexes if index: row = int(index[0].row()) # selected row code = self.proxyInventory.data(self.proxyInventory.index(row, 0)) group = self.proxyInventory.data(self.proxyInventory.index(row, 2)) available = self.proxyInventory.data(self.proxyInventory.index(row, 3)) move = Move(code, available, group, self) move.show() else: QtGui.QMessageBox.information(self, 'Message', "Please select the \n" + "item you wish to move") def modify_inventory(self): index = self.tblInventory.selectionModel().selectedRows() ### list of indexes if index: row = int(index[0].row()) # selected row code = self.proxyInventory.data(self.proxyInventory.index(row, 0)) group = self.proxyInventory.data(self.proxyInventory.index(row, 2)) modifyInventory = ModifyInventory(code, group, self) modifyInventory.show() self.tblInventory.clearSelection() # clear choice else: QtGui.QMessageBox.information(self, 'Message', "Please select the \n" + "item you wish to modify") def remove_client(self): index = self.tblClients.selectionModel().selectedRows() if index: row = int(index[0].row()) # selected row name = self.proxyClients.data(self.proxyClients.index(row, 1)) # 0 = fecha, 1 = codigo msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Delete", "Are you sure you want to delete: " + name + "?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: if mec_inventory.del_client_name(self.conn, self.c, name): self.refreshTables() # refresh QtGui.QMessageBox.information(self, 'Message', "The client: " + name + "\nhas been deleted sucessfully") else: QtGui.QMessageBox.critical(self, 'Error', 'An unexpected error has occurred.\n'+ 'Please try again.') self.tblClients.clearSelection() # clear choice else: QtGui.QMessageBox.information(self, 'Message', "Please select the \n" + "client you wish to delete") def modify_client(self): index = self.tblClients.selectionModel().selectedRows() if index: row = int(index[0].row()) # selected row name = self.proxyClients.data(self.proxyClients.index(row, 1)) # 0 = fecha, 1 = codigo modifyClient = ModifyClient(name, self) modifyClient.show() else: QtGui.QMessageBox.information(self, 'Message', "Please select the \n" + "client you wish to modify") def remove_purchase(self): index = self.tblPurchases.selectionModel().selectedRows() if index: row = int(index[0].row()) # selected row code = self.proxyPurchases.data(self.proxyPurchases.index(row, 1)) # 0 = fecha, 1 = codigo msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Delete", "Are you sure you want to delete purchase\n" " number: " + code + "?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: if mec_inventory.del_general(self.conn, self.c, code): self.refreshTables() # refresh QtGui.QMessageBox.information(self, 'Message', "Purchase number: " + code + "\nhas been deleted successfully.\n" + "Inventory must be reduced manually") else: QtGui.QMessageBox.critical(self, 'Error', 'An unexpected error has occurred.\n'+ 'Please try again.') self.tblPurchases.clearSelection() # clear choice else: QtGui.QMessageBox.information(self, 'Message', "Please select the\n" + "purchase that you want to delete") def reverse_sale(self): index = self.tblSales.selectionModel().selectedRows() if index: row = int(index[0].row()) # selected row code = self.proxySales.data(self.proxySales.index(row, 1)) # 0 = fecha, 1 = codigo msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Reverse", "Are you sure you want to reverse\n" "purchase number: " + code + "?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: if mec_inventory.del_general(self.conn, self.c, code): self.refreshTables() # refresh QtGui.QMessageBox.information(self, 'Message', "Purchase number: " + code + "\nhas been reversed successfully") else: QtGui.QMessageBox.critical(self, 'Error', 'An unexpected error has occurred.\n'+ 'Please try again.') self.tblSales.clearSelection() # clear choice else: QtGui.QMessageBox.warning(self, 'Message', "Please select the\n" + "purchase you want to reverse") def action_client(self): client = Client(self) client.show() def action_sale(self): sale = Sale(self) sale.show() def action_purchase(self): purchase = Purchase(self) purchase.show() def closeEvent(self,event): msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Warning", "Are you sure you want to exit?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: self.db.close() self.c.close() self.conn.close() event.accept() else: event.ignore() class Purchase(QtGui.QDialog, PurchaseGui): def __init__ (self, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) ### functionality ### self.btnAdd.clicked.connect(self.add) self.btnUndo.clicked.connect(self.undo) self.spnboxMargin.valueChanged.connect(self.margin_changed) self.spnboxPrice.valueChanged.connect(self.price_changed) self.spnboxCost.valueChanged.connect(self.cost_changed) ### connection, from parent ####### self.conn = self.parent().conn self.c = self.parent().c ### combo box categoria config self.cmboxCategory.addItems(mec_inventory.unique(self.c, "category", "Inventory")) self.cmboxCategory.completer().setCompletionMode(QtGui.QCompleter.PopupCompletion) ### code combo box ### self.cmBoxCode.addItems(mec_inventory.unique(self.c, "code", "Inventory")) self.cmBoxCode.completer().setCompletionMode(QtGui.QCompleter.PopupCompletion) self.cmBoxCode.setEditText("") self.cmBoxCode.activated.connect(self.code_return) self.cmboxGroup.activated.connect(self.group_return) self.code = "" # controlling multiple code input def cost_changed(self): self.spnboxMargin.setValue(0) self.spnboxPrice.setValue(0) def price_changed(self): cost = self.spnboxCost.value() if cost > 0: price = self.spnboxPrice.value() margin = (price/cost - 1) * 100 self.spnboxMargin.setValue(margin) def margin_changed(self): margin = self.spnboxMargin.value() cost = self.spnboxCost.value() price = cost * (1 + margin/100) self.spnboxPrice.setValue(price) def code_return(self): code = self.cmBoxCode.currentText() if self.code != code: self.cmboxGroup.clear() self.cmboxGroup.addItems(mec_inventory.unique(self.c, "group", "inventory", "code", code)) self.code = code self.group_return() def group_return(self): code = self.cmBoxCode.currentText() group = self.cmboxGroup.currentText() query = mec_inventory.query_add(self.c, code, group) ### temp error if query: self.leditName.setText(query[0]) # nombre self.spnboxCost.setValue(query[1]) # costo self.spnboxPrice.setValue(query[2]) # precio sugerido self.cmboxCategory.setEditText(query[3]) # categoria self.spnBoxMin.setValue(query[4]) # min self.spnBoxMax.setValue(query[5]) # max self.price_changed() else: QtGui.QMessageBox.information(self, 'Message', ' No previous records of this code have\n'+ 'been found. New records will be created.') def undo(self): self.leditName.clear() self.spnboxCost.setValue(0) self.spnBoxQuantity.setValue(1) self.spnboxMargin.setValue(0) self.spnboxPrice.setValue(0) self.cmboxCategory.clearEditText() self.cmboxGroup.clearEditText() self.leditVendor.clear() self.spnBoxMin.setValue(1) self.spnBoxMax.setValue(100) self.cmBoxCode.clearEditText() def add(self): code = self.cmBoxCode.currentText() name = self.leditName.text().capitalize() if code != "" and name != "": msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Purchase", "Are you sure you want to\n" "store this purchase?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: start = time.time() cost = self.spnboxCost.value() margin = self.spnboxMargin.value() price = self.spnboxPrice.value() quantity = self.spnBoxQuantity.value() group = self.cmboxGroup.currentText() cat = self.cmboxCategory.currentText().capitalize() vendor = self.leditVendor.text().capitalize() stockMin = self.spnBoxMin.value() stockMax = self.spnBoxMax.value() ### anadiendo ### succesful = mec_inventory.add_item_entry(self.conn, self.c, code, name, quantity, vendor, cost, price, group, cat, stockMin, stockMax) if succesful: self.parent().refreshTables() self.undo() # this has to go after refresh QtGui.QMessageBox.information(self, 'Message', 'This purchase has been\n'+ 'regstered successfully') self.close() else: QtGui.QMessageBox.critical(self, 'Error', 'An unexpected error occurred.\n'+ 'Please try again') end = time.time() print("compra time: " + str(end-start)) elif code == "": QtGui.QMessageBox.warning(self, 'Warning', 'Please enter a code') else: # nombre == "" QtGui.QMessageBox.warning(self, 'Warning', 'Please enter a name') class Sale(QtGui.QDialog, SaleGui): def __init__(self, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) ### functionality ### self.btnInsert.clicked.connect(self.add) self.btnUndo.clicked.connect(self.undo) self.btnConfirm.clicked.connect(self.confirm) self.btnDelete.clicked.connect(self.delete_entry) self.spnboxPrice.valueChanged.connect(self.price_changed) self.spnBoxMargin.valueChanged.connect(self.margin_changed) self.spnBoxQuantity.valueChanged.connect(self.quantity_changed) self.tblInventory.clicked.connect(self.table_clicked) ### combo box nombre config ### self.cmboxClient.setModel(self.parent().mdlClients) self.cmboxClient.setModelColumn(1) self.cmboxClient.completer().setCompletionMode(QtGui.QCompleter.PopupCompletion) self.cmboxClient.setEditText("") ### table ### self.model = QtGui.QStandardItemModel() self.model.setColumnCount(5) header = ["Code", "Name", "Item Price", "Quantity", "Total Price"] self.model.setHorizontalHeaderLabels(header) self.tblItems.setModel(self.model) ### abstract table / list of lists ### self.abstractTable = [] ### mini innventario ### self.mdlInventory = QtSql.QSqlQueryModel() self.proxyInventory = QtGui.QSortFilterProxyModel() self.proxyInventory.setSourceModel(self.mdlInventory) self.tblInventory.setModel(self.proxyInventory) self.refresh_inventory() header = ["Code", "Name", "Available", "Group"] for i in range(len(header)): self.mdlInventory.setHeaderData(i, QtCore.Qt.Horizontal, header[i]) self.cmboxInventory.addItems(header) # add headers to combo box self.tblInventory.horizontalHeader().setResizeMode(QtGui.QHeaderView.Interactive) # search funnctionality self.cmboxInventory.activated.connect(self.combo_box_changed) self.leditInventory.textChanged.connect(lambda: self.search(self.leditInventory.text())) self.proxyInventory.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive) # case insennsitive ### sqlite 3 connection from parent ### self.conn = self.parent().conn self.c = self.parent().c def combo_box_changed(self): self.proxyInventory.setFilterKeyColumn(self.cmboxInventory.currentIndex()) def search(self, text): self.proxyInventory.setFilterRegExp("^" + text) def refresh_inventory(self): self.mdlInventory.setQuery("""SELECT code, name, avail, groupx FROM Inventory""", self.parent().db) # uses parent connection def table_clicked(self): self.spnBoxQuantity.setValue(1) # reset cantidad index = self.tblInventory.selectionModel().selectedRows() ### list of indexes row = int(index[0].row()) # selected row code = self.proxyInventory.data(self.proxyInventory.index(row, 0)) group = self.proxyInventory.data(self.proxyInventory.index(row, 3)) query = mec_inventory.query_sale(self.c, code, group) if query: self.leditCode.setText(code) # arg self.leditName.setText(query[0]) self.leditGroup.setText(group) self.spnboxPrice.setValue(query[1]) self.spnboxCost.setValue(query[2]) self.price_changed() else: QtGui.QMessageBox.critical(self, 'Error', "An unexpected error has occurred.\n" + "Please try again") self.refresh_inventory() def margin_changed(self): price = (1 + (self.spnBoxMargin.value() / 100)) * self.spnboxCost.value() self.spnboxPrice.setValue(price) self.quantity_changed() def quantity_changed(self): priceTotalItem = self.spnboxPrice.value() * self.spnBoxQuantity.value() self.spnBoxTotalItemPrice.setValue(priceTotalItem) def refreshTotals(self): if self.abstractTable: taxes = 0.0 discounts = 0.0 subtotal = 0.0 for line in self.abstractTable: taxes += line[2] * line[3] * line[1] # impuesto * precio * cantidad discounts += (1 + line[2]) * line [3] * line[4] * line[1] # (1 + impuesto) * precio * desc * cant subtotal += line[3] * line[1] # precio * cantidad self.spnBoxSubtotal.setValue(subtotal) self.spnBoxTaxT.setValue(taxes) self.spnBoxDiscountT.setValue(discounts) self.spnBoxGrandTotal.setValue(subtotal + taxes - discounts) else: self.spnBoxSubtotal.setValue(0) self.spnBoxTaxT.setValue(0) self.spnBoxDiscountT.setValue(0) self.spnBoxGrandTotal.setValue(0) def delete_entry(self): index = self.tblItems.selectionModel().selectedRows() ### list of indexes if (index): row = int(index[0].row()) # selected row self.model.removeRow(row) if row == 0: self.cmboxClient.setEnabled(True) del self.abstractTable[row] # deletes from abstract table self.refreshTotals() self.tblItems.clearSelection() else: QtGui.QMessageBox.information(self, 'Message', 'Please select the line\n' + 'you wish to remove') def price_changed(self): if self.spnboxCost.value() > 0: margin = (self.spnboxPrice.value() / self.spnboxCost.value()) * 100 - 100 self.spnBoxMargin.setValue(margin) # sets margin self.quantity_changed() def undo (self): self.leditCode.clear() self.leditName.clear() self.leditGroup.clear() self.spnboxCost.setValue(0) self.spnboxPrice.setValue(0) self.spnBoxQuantity.setValue(1) self.spnBoxMargin.setValue(0) self.spnboxDiscount.setValue(0) self.chkBoxItbms.setChecked(True) self.chkBoxCredit.setChecked(False) self.spnBoxTotalItemPrice.setValue(0.00) def add(self): ### table view ### code = self.leditCode.text() if code != "": client = self.cmboxClient.currentText() quantity = self.spnBoxQuantity.value() group = self.leditGroup.text() error = mec_inventory.sale_valid(self.c, code, client, quantity, group) # returns list of errors if not error: ### shopping cart table ### line = [] line.append(QtGui.QStandardItem(self.leditCode.text())) line.append(QtGui.QStandardItem(self.leditName.text())) line.append(QtGui.QStandardItem(self.spnboxPrice.text())) line.append(QtGui.QStandardItem(self.spnBoxQuantity.text())) line.append(QtGui.QStandardItem(self.spnBoxTotalItemPrice.text())) self.model.appendRow(line) ### abstract table ### line = [] line.append(self.leditCode.text()) # 0 line.append(quantity) # 1 line.append(float(0.07 if self.chkBoxItbms.isChecked() else 0.0)) # 2 line.append(self.spnboxPrice.value()) # 3 line.append(self.spnboxDiscount.value() / 100) # 4 # percentage line.append("CRE" if self.chkBoxCredit.isChecked() else "DEB") # 5 line.append(self.cmboxClient.currentText()) # 6 line.append(self.leditGroup.text()) # 7 self.abstractTable.append(line) self.refreshTotals() self.undo() self.cmboxClient.setEnabled(False) # disable edit client elif 3 in error: # error code for missinng client QtGui.QMessageBox.information(self, 'Message', 'No previous records of this client\n' + 'have been found. Please create it') newClient = Client(self.parent()) newClient.leditName.setText(client) newClient.show() elif 2 in error: QtGui.QMessageBox.warning(self, 'Warning', 'The item quantity you wish to sell\n' + 'is not available in your inventory') else: QtGui.QMessageBox.critical(self, 'Error', 'An unexpected error has occurred.\n' + 'Please try again') self.refresh_inventory() else: # code == "" QtGui.QMessageBox.warning(self, 'Error', 'Please select\n' + 'an inventory item') def confirm(self): if self.abstractTable: msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Sell", "Are you sure you\n" "want to make this sale?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: start = time.time() if mec_inventory.shopping_cart(self.conn, self.c, self.abstractTable): self.parent().refreshTables() del self.abstractTable [:] for i in range(self.model.rowCount()): self.model.removeRow(0) self.refreshTotals() self.cmboxClient.clearEditText() self.undo() self.cmboxClient.setEnabled(True) end = time.time() print("time venta: " + str(end - start)) QtGui.QMessageBox.information(self, 'Message', 'The transaction has been\n'+ 'registered successfully') else: QtGui.QMessageBox.critical(self, 'Error', 'An unexpected error has occurred.\n' + 'Please try again') self.refresh_inventory() # regardless succesful or not else: QtGui.QMessageBox.warning(self, 'Warning', 'Please insert an item\n' + 'to be sold') class Client(QtGui.QDialog, ClientGui): def __init__(self, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) ### functionality ### self.btnUndo.clicked.connect(self.undo) self.btnAdd.clicked.connect(self.anadir) ### validators ### regexpPhone = QtCore.QRegExp("^[0-9-()]*$") # 0-9 or - or () phoneVal = QtGui.QRegExpValidator(regexpPhone) self.leditPhone.setValidator(phoneVal) self.leditCellphone.setValidator(phoneVal) self.leditFax.setValidator(phoneVal) ### connection, from parent ### self.conn = self.parent().conn self.c = self.parent().c def anadir(self): name = self.leditName.text().title() if name != "": msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Add Client", "Are you sure you want to\n" "add this client?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: start = time.time() id = self.leditID.text() phone = self.leditPhone.text() cellphone = self.leditCellphone.text() address = self.leditAddress.text().capitalize() email = self.leditEmail.text() fax = self.leditFax.text() if mec_inventory.add_client(self.conn, self.c, id, name, email, phone, cellphone, fax, address): self.parent().refreshTables() self.undo() QtGui.QMessageBox.information(self, 'Message', 'The client has been\n'+ 'added successfully') else: QtGui.QMessageBox.warning(self, 'Error', 'The client that you are trying\n' + 'to add already exists') end = time.time() print("time cliente: " + str(end - start)) else: # nombre == "" QtGui.QMessageBox.warning(self, 'Warning', 'Please insert a name') def undo(self): self.leditName.clear() self.leditID.clear() self.leditPhone.clear() self.leditCellphone.clear() self.leditAddress.clear() self.leditFax.clear() self.leditEmail.clear() class ModifyInventory(QtGui.QDialog, ModifyGui): def __init__(self, code, group, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) # parent connection self.conn = self.parent().conn self.c = self.parent().c self.leditCode.setText(code) self.cmboxGroup.addItem(group) self.cmboxGroup.addItem("Global") items = mec_inventory.query_modify(self.c, code, group) # Returns [disponible,precioUniSug,costoUni,categoria,stockmin,stockmax] if items: self.available = items[0] self.price = items[1] self.cost = items[2] self.category = items[3] self.min = items[4] self.max = items[5] self.name = items[6] self.spnboxAvailable.setValue(self.available) self.spnboxPrice.setValue(self.price) self.spnboxCost.setValue(self.cost) self.cmboxCategory.setEditText(self.category) self.spnboxMin.setValue(self.min) self.spnboxMax.setValue(self.max) self.leditName.setText(self.name) self.spnboxMargin.setValue(((self.price / self.cost) - 1) * 100) ### functionality ### self.btnModify.clicked.connect(self.modify_inventory) self.btnUndo.clicked.connect(self.undo) self.spnboxMargin.valueChanged.connect(self.margin_changed) self.spnboxPrice.valueChanged.connect(self.price_changed) self.spnboxCost.valueChanged.connect(self.cost_changed) def modify_inventory(self): msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Modify", "Are you sure you want\n" "to modify this item?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: start = time.time() code = self.leditCode.text() name = self.leditName.text() cost = self.spnboxCost.value() margin = self.spnboxMargin.value() price = self.spnboxPrice.value() available = self.spnboxAvailable.value() group = self.cmboxGroup.currentText() cat = self.cmboxCategory.currentText().capitalize() stockMin = self.spnboxMin.value() stockMax = self.spnboxMax.value() ### modificando ### mec_inventory.modify(self.conn, self.c, code, group, available, price, cat, stockMin, stockMax, cost, name) self.parent().refreshTables() QtGui.QMessageBox.information(self, 'Message', 'The modification has been\n'+ 'registered successfully') self.close() end = time.time() print("modificar time: " + str(end-start)) def cost_changed(self): self.spnboxMargin.setValue(0) self.spnboxPrice.setValue(0) def price_changed(self): cost = self.spnboxCost.value() if cost > 0: price = self.spnboxPrice.value() margin = (price/cost - 1) * 100 self.spnboxMargin.setValue(margin) def margin_changed(self): margin = self.spnboxMargin.value() cost = self.spnboxCost.value() price = cost * (1 + margin/100) self.spnboxPrice.setValue(price) def undo(self): self.leditName.setText(self.name) self.spnboxCost.setValue(self.cost) self.spnboxAvailable.setValue(self.available) self.spnboxMargin.setValue((self.price / self.cost - 1) * 100) self.spnboxPrice.setValue(self.price) self.cmboxCategory.setEditText(self.category) self.cmboxGroup.setCurrentIndex(0) self.spnboxMin.setValue(self.min) self.spnboxMax.setValue(self.max) class ModifyClient(QtGui.QDialog, ClientModifyGui): def __init__(self, name, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) self.leditName.setText(name) # functionality self.btnUndo.clicked.connect(self.undo) self.btnModify.clicked.connect(self.modify) ### validators ### regexpPhone = QtCore.QRegExp("^[0-9-()]*$") # 0-9 or - or () phoneVal = QtGui.QRegExpValidator(regexpPhone) self.leditPhone.setValidator(phoneVal) self.leditCellphone.setValidator(phoneVal) self.leditFax.setValidator(phoneVal) ### connection, from parent ### self.conn = self.parent().conn self.c = self.parent().c info = mec_inventory.query_client(self.c, name) if info: self.id = info[0] self.email = info[1] self.phone = info[2] self.cel = info[3] self.fax = info[4] self.address = info[5] self.leditName.setText(name) self.leditID.setText(info[0]) self.leditEmail.setText(info[1]) self.leditPhone.setText(info[2]) self.leditCellphone.setText(info[3]) self.leditFax.setText(info[4]) self.leditAddress.setText(info[5]) else: QtGui.QMessageBox.warning(self, 'Error','An unexpected error has occurred.\n'+ 'Please try again') self.close() def modify(self): msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Add Client", "Are you sure you want\n" "to modify this client?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: start = time.time() name = self.leditName.text() id = self.leditID.text() phone = self.leditPhone.text() cell = self.leditCellphone.text() address = self.leditAddress.text().capitalize() email = self.leditEmail.text() fax = self.leditFax.text() mec_inventory.modify_client(self.conn, self.c, name, id, email, phone, cell, fax, address) self.parent().refreshTables() QtGui.QMessageBox.information(self, 'Message', 'The client has been\n'+ 'modified successfully') self.close() end = time.time() print("time mod cliente: " + str(end - start)) def undo(self): self.leditID.setText(self.id) self.leditPhone.setText(self.phone) self.leditCellphone.setText(self.cel) self.leditAddress.setText(self.address) self.leditFax.setText(self.fax) self.leditEmail.setText(self.email) class Move(QtGui.QDialog, MoveGui): def __init__(self, code, available, group, parent=None): QtGui.QDialog.__init__(self, parent) self.setupUi(self) self.conn = self.parent().conn self.c = self.parent().c self.leditCode.setText(code) self.spnboxQuantity.setMaximum(available) self.leditFromGroup.setText(str(group)) self.cmboxToGroup.addItems(mec_inventory.unique(self.c, "groupx", "inventory", "code", code)) self.cmboxToGroup.removeItem(self.cmboxToGroup.findText(group)) self.btnConfirm.clicked.connect(self.confirm) def confirm(self): msgbox = QtGui.QMessageBox(QtGui.QMessageBox.Icon(4), "Sell", "Are you sure you want to\n" "move this item?", parent=self) btnYes = msgbox.addButton("Yes", QtGui.QMessageBox.ButtonRole(0)) # yes btnNo = msgbox.addButton("No", QtGui.QMessageBox.ButtonRole(1)) # no msgbox.exec_() if msgbox.clickedButton() == btnYes: code = self.leditCode.text() quantity = self.spnboxQuantity.value() fromGroup = self.leditFromGroup.text() toGroup = self.cmboxToGroup.currentText() print(str(code) + str(quantity) + str(fromGroup) + str(toGroup)) mec_inventory.move(self.conn, self.c, code, fromGroup, toGroup, quantity) self.parent().refreshTables() QtGui.QMessageBox.information(self, 'Message', 'The operation has been \n'+ 'made successfully') self.close() ##################### starts everything ############################################# if __name__ == "__main__": app = QtGui.QApplication(sys.argv) inventory = Inventory() # borrar esto inventory.show() # si se va a condicionar al nas location # if os.path.isdir("\\\\NASPAREDES\\db"): # inventario = Inventario() # inventario.show() # else: # widget = QtGui.QWidget() # QtGui.QMessageBox.warning( widget, 'Error de conexin', 'Necesitamos que este conectado a\n' + # 'la red wifi') sys.exit(app.exec_())
/mec_inventory.py
""" Author:Christopher Holder """ def create_tables(connection,cursor): """ This function creates the neccessary tables in the database. """ cursor.execute("CREATE TABLE IF NOT EXISTS OrdinalNumber(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,num TEXT NOT NULL)") cursor.execute('CREATE TABLE IF NOT EXISTS OrdinalNumberS(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, num TEXT NOT NULL)') cursor.execute("""CREATE TABLE IF NOT EXISTS Inventory(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,code TEXT NOT NULL,name TEXT NOT NULL,avail INTEGER NOT NULL,costUni REAL NOT NULL,priceUniSug REAL NOT NULL,groupx TEXT NOT NULL,category TEXT,stockmin INTEGER,stockmax INTEGER)""") cursor.execute("""CREATE TABLE IF NOT EXISTS Entries(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,dat TEXT,trans TEXT,code TEXT NOT NULL,name TEXT NOT NULL,quantity INTEGER NOT NULL,provider TEXT ,costUni REAL NOT NULL,costItems REAL NOT NULL,groupx TEXT NOT NULL, category TEXT)""") cursor.execute("""CREATE TABLE IF NOT EXISTS Outs(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,dat TEXT,trans TEXT,code TEXT NOT NULL,name TEXT NOT NULL,quantity INTEGER NOT NULL,groupx TEXT NOT NULL,priceUni REAL,priceItems REAL,tax REAL,revenue REAL,winnings REAL,payment TEXT,client TEXT)""") cursor.execute('CREATE TABLE IF NOT EXISTS Clients(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,identification TEXT,name TEXT,mail TEXT,num TEXT,cel TEXT,fax TEXT ,direction TEXT,bought INTEGER,money_invested REAL,paid REAL,debt REAL)') add_client(connection,cursor,'Misc','','','','','','') connection.commit() return True def add_item_entry(connection,cursor,code = '#',name = "",quantity = 0,provider = "",costUni = 0.00,priceUniSug = 100 ,groupx = '',category = "",stockmin = "",stockmax = ""): """ This function adds entries to the table Inventory and Entries. """ cursor.execute('SELECT code,groupx FROM Inventory WHERE code=? AND groupx = ?',(code,groupx)) data = cursor.fetchone() if data == None: transnum = ordinal_generator(connection,cursor) avail = quantity costItems = costUni * quantity costItems = round(costItems,2) priceUniSug = round(priceUniSug,2) costUni = round(costUni,2) b = (code,name,avail,costUni,priceUniSug,groupx,category,stockmin,stockmax) c = (transnum,code,name,quantity,provider,costUni,costItems,groupx,category) cursor.execute("INSERT INTO Inventory (code,name,avail,costUni,priceUniSug,groupx,category,stockmin,stockmax) VALUES(?,?,?,?,?,?,?,?,?)",b) cursor.execute("INSERT INTO Entries (dat,trans,code,name,quantity,provider,costUni,costItems,groupx,category) VALUES(date('now'),?,?,?,?,?,?,?,?,?)",c) connection.commit() else: transnum = ordinal_generator(connection,cursor) avail = quantity costItems = costUni * quantity costItems = round(costItems,2) c = (transnum,code,name,quantity,provider,round(costUni,2),costItems,groupx,category) #------------------------------------------------------------------------------------------------------- increase_stock(cursor,code,groupx,quantity) update_all(cursor,code,groupx,costUni,priceUniSug,name,category) #------------------------------------------------------------------------------------------------------- cursor.execute("INSERT INTO Entries (dat,trans,code,name,quantity,provider,costUni,costItems,groupx,category) VALUES(date('now'),?,?,?,?,?,?,?,?,?)",c) connection.commit() return True def add_item_exit_fixed(connection,cursor,code = "#",quantity = 1,tax = 0.07,pricef = 10.00,discount = 0,payment = 'CRE',client = '',trans='',groupx = ''): a =(code,groupx) cursor.execute('SELECT name FROM Inventory WHERE code = ? AND groupx = ?',a) data0 = cursor.fetchone() name = str(data0[0]) decrease_stock(cursor,code,groupx,quantity) priceUni = pricef taxTot = tax * priceUni * quantity taxTot = round(taxTot,2) priceItems = priceUni * (tax + 1) * quantity if (discount == 0): priceItems = round(priceItems,2) else: discount = priceItems * discount priceItems = priceItems - discount priceItems = round(priceItems,2) cursor.execute('SELECT costUni FROM Inventory WHERE code = ? AND groupx = ?',a) data2 = cursor.fetchone() costItems = (float(data2[0]))* quantity costItems = round(costItems,2) revenue = priceItems - costItems revenue = round(revenue,2) winnings = revenue - taxTot winnings = round(winnings,2) auto_del_0(connection,cursor) b = (trans,code,name,quantity,groupx,priceUni,priceItems,taxTot,revenue,winnings,payment,client) cursor.execute("INSERT INTO Outs (dat,trans,code,name,quantity,groupx,priceUni,priceItems,tax,revenue,winnings,payment,client) VALUES(date('now'),?,?,?,?,?,?,?,?,?,?,?,?)",b) update_client_info(connection,cursor,client) connection.commit() #------------------------------------------------------------------------------------------------------- return True #------------------------------------------------------------------------------------------------------- def modify(connection,cursor,code,groupx,avail,priceUni,category,smin,smax,costUni, name): if (groupx == 'Global'): cursor.execute('UPDATE Inventory SET name = ?,priceUniSug = ?,category = ?, stockmin = ?,stockmax = ? ,costUni = ? WHERE code = ?',(name,priceUni,category,smin,smax,costUni,code)) else: cursor.execute('UPDATE Inventory SET name = ?,avail = ?,priceUniSug = ?,category = ?, stockmin = ?,stockmax = ? ,costUni = ? WHERE code = ? AND groupx = ?',(name,avail,priceUni,category,smin,smax,costUni,code,groupx)) connection.commit() def modify_client(connection,cursor,name,identification,mail,num,cel,fax,direction): sel = (identification,mail,num,cel,fax,direction,name) cursor.execute('UPDATE Clients SET identification = ?,mail = ?,num = ?,cel = ?,fax = ?,direction = ? WHERE name = ?',sel) connection.commit() def move(connection,cursor,code,groupx1,groupx2,quantity): cursor.execute('SELECT code,name,avail,costUni,priceUniSug,groupx,category,stockmin,stockmax FROM Inventory WHERE code = ? and groupx = ?',(code,groupx1)) data = cursor.fetchone() decrease_stock(cursor,code,groupx1,quantity) auto_del_0(connection,cursor) cursor.execute('SELECT name FROM Inventory WHERE code = ? AND groupx = ?' , (code,groupx2)) data2 = cursor.fetchone() if (data2 == None): c = (data[0],data[1],quantity,data[3],data[4],groupx2,data[6],data[7],data[8]) cursor.execute('INSERT INTO Inventory (code,name,avail,costUni,priceUniSug,groupx,category,stockmin,stockmax) VALUES(?,?,?,?,?,?,?,?,?)',c) else: increase_stock(cursor,code,groupx2,quantity) connection.commit() def shopping_cart(connection,cursor,lista): """ This function does multiple sales.lista is a list of lists. The elements should contain the following arguments. : [code,quantity,tax,pricef,discount,payment,client,groupx] """ counter = 0 results =[] failed = {} for e in lista: a = sale_valid2(cursor,e[0],e[1],e[7]) results.append(a) for el in range(len(results)): if (results[el] != 0): failed.setdefault((el+1),results[el]) if (len(failed) > 0): print(failed) return failed t = ordinal_generator2(connection,cursor) for e in lista: counter += 1 transa = t + (str(counter).zfill(3)) add_item_exit_fixed(connection,cursor,e[0],e[1],e[2],e[3],e[4],e[5],e[6],transa,e[7]) return True def sale_valid(cursor,code,client_name,quantity,groupx): """ Checks If client ,quantity, or code exists. 0 = Sucessful 1 = does not exists. 2 = reduces below existing units , 3 = client does not exist """ l = [] a = (code,groupx) b = (client_name,) cursor.execute('SELECT code,avail FROM Inventory WHERE code = ? AND groupx = ?',a) data0 = cursor.fetchone() if (data0 == None): l.append(1) if (data0 != None): if (data0[1] < quantity): l.append(2) cursor.execute('SELECT name FROM Clients WHERE name = ?',b) data2 = cursor.fetchone() if (data2 == None): l.append(3) if (len(l) == 0): l = 0 return l def sale_valid2(cursor,code,quantity,groupx): """ Checks If client ,quantity, or code exists. 0 = Sucessful 1 = does not exists. 2 = reduces below existing units , """ l = [] a = (code,groupx) cursor.execute('SELECT code,avail FROM Inventory WHERE code = ? AND groupx = ?',a) data0 = cursor.fetchone() if (data0 == None): l.append(1) if (data0 != None): if (data0[1] < quantity): l.append(2) if (len(l) == 0): l = 0 return l def query_add(cursor,code,groupx): cursor.execute('SELECT name,costUni,priceUniSug,category,stockmin,stockmax FROM Inventory WHERE code = ? AND groupx = ?',(code,groupx)) data = cursor.fetchone() if (data == None): return False return data def query_sale(cursor,code,groupx): """ Returns list with [name,priceUniSug,costUni] """ cursor.execute('SELECT name,priceUniSug,costUni FROM Inventory WHERE code = ? AND groupx = ?',(code,groupx)) data = cursor.fetchone() if (data == None): print('No name with that code') return False return data def query_modify(cursor,code,groupx): """ Returns [avail,priceUniSug,costUni,category,stockmin,stockmax,name] """ cursor.execute('SELECT avail,priceUniSug,costUni,category,stockmin,stockmax, name FROM Inventory WHERE code = ? AND groupx = ?',(code,groupx)) data = cursor.fetchone() return data def query_client(cursor,name): """ Returns [identification,mail,num,cel,fax,direction,bought,money_invested,paid,debt] """ cursor.execute('SELECT identification,mail,num,cel,fax,direction FROM Clients WHERE name = ?',(name,)) data = cursor.fetchone() return data #------------------------------------------------------------------------------------------------------- def add_client(connection,cursor,identification,name,mail,num,cel,fax,direction): """ Adds client to client table. Returns False if the name has been used before. """ bought = 0 money_invested = 0.0 paid = 0.0 debt = 0.0 i = (name,) cursor.execute('SELECT name FROM Clients WHERE name = ?',i) data = cursor.fetchone() if (data != None): print('Name already used.') return False t = (identification,name,mail,num,cel,fax,direction,bought,money_invested,paid,debt) cursor.execute("INSERT INTO Clients (identification,name,mail,num,cel,fax,direction,bought,money_invested,paid,debt) VALUES (?,?,?,?,?,?,?,?,?,?,?)",t) connection.commit() return True def update_client_info(connection,cursor,user): a = (user,) money = [] articles = [] cursor.execute('SELECT priceItems,quantity FROM Outs WHERE client = ? ',a) data2 = cursor.fetchall() if (data2 == None): return False for row2 in data2: money.append(row2[0]) for row2 in data2: articles.append(row2[1]) debit = [] credit = [] cursor.execute("SELECT priceItems FROM Outs WHERE client = ? AND payment = 'DEB'",a) data4 = cursor.fetchall() for row4 in data4: debit.append(row4[0]) cursor.execute("SELECT priceItems FROM Outs WHERE client = ? AND payment = 'CRE'",a) data5 = cursor.fetchall() for row5 in data5: credit.append(row5[0]) money = sum(money) articles = sum(articles) debit = sum(debit) credit =sum(credit) cursor.execute('UPDATE Clients SET bought = ?,money_invested = ?,paid = ?,debt = ? WHERE name = ?',(articles,money,debit,credit,user)) connection.commit() def del_client_id(connection,cursor,identification): cursor.execute('DELETE FROM Clients WHERE identification = ?',(identification,)) connection.commit() return True def del_client_name(connection,cursor,name): cursor.execute('DELETE FROM Clients WHERE name = ?',(name,)) connection.commit() return True #------------------------------------------------------------------------------------------------------- def calc_bal_his(cursor): """ CalcuLates balances of all exits and entries ever and adds them to the historic balance db. """ t = [] cursor.execute('SELECT costItems FROM Entries') data = cursor.fetchall() for row0 in data: t.append(row0[0]) costTot = sum(t) cursor.execute('SELECT priceItems,revenue,tax,winnings FROM Outs') query = cursor.fetchall() #------------------------------------------------------------------------------------------------------- p = [] for row2 in query: p.append(row2[0]) priceTot = sum(p) #------------------------------------------------------------------------------------------------------- g = [] for row3 in query: g.append(row3[1]) revenueTot = sum(g) #------------------------------------------------------------------------------------------------------- i = [] for row4 in query: i.append(row4[2]) taxTot = sum(i) #------------------------------------------------------------------------------------------------------- x = [] for row5 in query: x.append(row5[3]) winningsTot = sum(x) #------------------------------------------------------------------------------------------------------- cd = calc_deb(cursor) cc = calc_cre(cursor) return [costTot,priceTot,cd,cc,round((priceTot - costTot),2),taxTot,round((priceTot - costTot - taxTot),2)] def calc_bal_mes(cursor,year,month): if (len(year) != 4) or (int(year) < 2016) or (int(year)> 3000) or (isinstance(year,float)) or (len(month) != 2) or (isinstance(month,float)) or (int(month)< 0) or (int(month)>12) : print('Bad date') return False date = year+'-'+ month entries = [] #------------------------------------------------------------------------------------------------------- cursor.execute('SELECT dat,costItems FROM Entries') data = cursor.fetchall() for row in data: if (date in row[0]): entries.append(row[1]) costTot = sum(entries) cursor.execute('SELECT dat,priceItems,revenue,tax,winnings FROM Outs ') query = cursor.fetchall() #------------------------------------------------------------------------------------------------------- p = [] for e in query: if (date in e[0]): p.append(e[1]) priceTot = sum(p) #------------------------------------------------------------------------------------------------------- g = [] for d in query: if (date in d[0]): g.append(d[2]) revenueTot = sum(g) #------------------------------------------------------------------------------------------------------- i = [] for elem in query: if (date in elem[0]): i.append(elem[3]) taxTot = sum(i) #------------------------------------------------------------------------------------------------------- x = [] for al in query: if(date in al[0]): x.append(al[4]) winningsTot = sum(x) #------------------------------------------------------------------------------------------------------- cd = calc_deb(cursor,date) cc = calc_cre(cursor,date) return [costTot,priceTot,cd,cc,round((priceTot - costTot),2),taxTot,round((priceTot - costTot - taxTot),2)] def calc_bal_year(cursor,year): if (len(year) != 4) or (int(year) < 2016) or (int(year)> 3000) or (isinstance(year,float)) : print('Not proper date.') return False date = year entries = [] #------------------------------------------------------------------------------------------------------- cursor.execute('SELECT dat,costItems FROM Entries') data = cursor.fetchall() for row in data: if (date in row[0]): entries.append(row[1]) costTot = sum(entries) cursor.execute('SELECT dat,priceItems,revenue,tax,winnings FROM Outs ') query = cursor.fetchall() #------------------------------------------------------------------------------------------------------- p = [] for e in query: if (date in e[0]): p.append(e[1]) priceTot = sum(p) #------------------------------------------------------------------------------------------------------- g = [] for d in query: if (date in d[0]): g.append(d[2]) revenueTot = sum(g) #------------------------------------------------------------------------------------------------------- i = [] for elem in query: if (date in elem[0]): i.append(elem[3]) taxTot = sum(i) #------------------------------------------------------------------------------------------------------- x = [] for al in query: if(date in al[0]): x.append(al[4]) winningsTot = sum(x) #------------------------------------------------------------------------------------------------------- cd = calc_deb(cursor,date) cc = calc_cre(cursor,date) return [costTot,priceTot,cd,cc,round((priceTot - costTot),2),taxTot,round((priceTot - costTot - taxTot),2)] def calc_bal_day(cursor,year,month,day): if (len(year) != 4) or (int(year) < 2016) or (int(year)> 3000) or (isinstance(year,float)) or (len(month) != 2) or (isinstance(month,float)) or (int(month)< 0) or (int(month)>12) or (int(day) > 31) or (len(day) != 2): print('Bad date') return False date = year+'-'+ month + '-' + day entries = [] cursor.execute('SELECT dat,costItems FROM Entries') data = cursor.fetchall() for row in data: if (date in row[0]): entries.append(row[1]) costTot = sum(entries) cursor.execute('SELECT dat,priceItems,revenue,tax,winnings FROM Outs ') query = cursor.fetchall() #------------------------------------------------------------------------------------------------------- p = [] for e in query: if (date in e[0]): p.append(e[1]) priceTot = sum(p) #------------------------------------------------------------------------------------------------------- g = [] for d in query: if (date in d[0]): g.append(d[2]) revenueTot = sum(g) #------------------------------------------------------------------------------------------------------- i = [] for elem in query: if (date in elem[0]): i.append(elem[3]) taxTot = sum(i) #------------------------------------------------------------------------------------------------------- x = [] for al in query: if(date in al[0]): x.append(al[4]) winningsTot = sum(x) #------------------------------------------------------------------------------------------------------- cd = calc_deb(cursor,date) cc = calc_cre(cursor,date) return [costTot,priceTot,cd,cc,round((priceTot - costTot),2),taxTot,round((priceTot - costTot - taxTot),2)] #------------------------------------------------------------------------------------------------------- def gen_query(cursor,table,column,stri,num): """ Returns a list with elements that contain the string. Returns empty list if it does find one. """ list1 = [] list2 = [] query = 'SELECT '+ str(column) +' FROM '+ str(table) cursor.execute(query) data = cursor.fetchall() if (data == None): return list1 for row in data: list1.append(row[0]) for e in list1: if (stri in e ): list2.append(e) while (len(list2) > num): list2.pop() print(list2) return list2 def paid(connection,cursor,trans): """ Marks an item as paid. """ t = (trans,) cursor.execute("UPDATE Outs SET payment = 'DEB' WHERE trans = ?",(trans,)) cursor.execute("SELECT client FROM Outs WHERE trans = ?",(trans,)) data = cursor.fetchone() update_client_info(connection,cursor,data[0]) connection.commit() def move_to_credit(connection,cursor,trans): """ Marks an item as not paid. """ cursor.execute("UPDATE Outs SET payment = 'CRE' WHERE trans = ?",(trans,)) cursor.execute("SELECT client FROM Outs WHERE trans = ?",(trans,)) data = cursor.fetchone() update_client_info(connection,cursor,data[0]) connection.commit() def calc_deb(cursor, date = None): """ Calculates liquidity. """ deb = [] if (date == None): cursor.execute("SELECT priceItems FROM Outs WHERE payment = 'DEB'") data = cursor.fetchall() for e in data: deb.append(e[0]) else: cursor.execute("SELECT priceItems,dat FROM Outs WHERE payment = 'DEB'") data = cursor.fetchall() for e in data: if (date in e[1]): deb.append(e[0]) deb = round(sum(deb),2) return deb def calc_cre(cursor,date = None): """ Calculates money customers currently owe. """ cre = [] if (date == None): cursor.execute("SELECT priceItems FROM Outs WHERE payment = 'CRE'") data = cursor.fetchall() for e in data: cre.append(e[0]) else: cursor.execute("SELECT priceItems,dat FROM Outs WHERE payment = 'CRE'") data = cursor.fetchall() for e in data: if (date in e[1]): cre.append(e[0]) cre = round(sum(cre),2) return cre #------------------------------------------------------------------------------------------------------- def del_general(connection,cursor,trans): """ Generalizes use of delete function. Clients table delete not included. """ try: if(trans[0] == '1'): return del_item_entries(connection,cursor,trans) elif(trans[0] == '2'): return del_item_salidas(connection,cursor,trans) else: print('Unknown transaction number') return False except TypeError: print('Error in cell') return False def del_item_entries(connection,cursor,trans): """ Deletes items from entries by transaction number. """ cursor.execute('DELETE FROM Entries WHERE trans = ?',(trans,)) connection.commit() return True def del_item_inventory(connection,cursor,code,groupx): """ Deletes items from inventory by code. """ cursor.execute('DELETE FROM Inventory WHERE code = ? AND groupx = ?',(code,groupx)) connection.commit() return True def del_item_salidas(connection,cursor,trans): """ Deletes items by transaction number. """ cursor.execute('SELECT quantity FROM Outs WHERE trans = ?',(trans,)) data = cursor.fetchone() if (data == None): print('Transaction number not from an Out') return False cursor.execute('SELECT priceItems,client FROM Outs WHERE trans = ?',(trans,)) p = cursor.fetchone() cursor.execute('SELECT money_invested FROM Clients WHERE name = ? ',(p[1],)) d = cursor.fetchone() f = d[0]- p[0] cursor.execute('UPDATE Clients SET money_invested = ? WHERE name = ?',(f,p[1])) cursor.execute('SELECT code,groupx FROM Outs WHERE trans = ?',(trans,)) data2 = cursor.fetchone() #------------------------------------------------------------------------------------------------------- g = (data2[0],data2[1]) cursor.execute('SELECT avail FROM Inventory WHERE code = ? AND groupx = ?',g) data3 = cursor.fetchone() avail = data3[0] + data[0] b =(avail,data2[0],data2[1]) cursor.execute('UPDATE Inventory SET avail = ? WHERE code = ? AND groupx = ?',b) #------------------------------------------------------------------------------------------------------- cursor.execute('DELETE FROM Outs WHERE trans = ?',(trans,)) connection.commit() return True def auto_del_0(connection,cursor): cursor.execute('SELECT avail FROM Inventory WHERE avail = 0') data4 = cursor.fetchone() if data4 != None: cursor.execute('DELETE FROM Inventory WHERE avail = 0') def unique(cursor,column,table,key_column = "",key = ""): if key_column == "": cursor.execute("SELECT DISTINCT "+ column + " FROM " + table) else: cursor.execute("SELECT DISTINCT " + column + " FROM " + table + " WHERE " + key_column + " = ?",(key,)) unique_values = [] data = cursor.fetchall() if data != None: for line in data: unique_values.append(line[0]) return unique_values #------------------------------------------------------------------------------------------------------- def ordinal_generator(connection,cursor): """ Generates string numbers starting with 1 and makes sure to never have used them before.It also adds them complementary 0's until it has a minimum length of 8 characters. """ exists = False trans = "" cursor.execute('SELECT MAX(ID) FROM OrdinalNumber') index = cursor.fetchone() if (index[0] == None): trans = '00000000' else: index = str(index[0]) trans = index.zfill(8) d = ('a',) cursor.execute('INSERT INTO OrdinalNumber(num) VALUES (?)',d) connection.commit() return ('1' + trans) def ordinal_generator2(connection,cursor): exists = False trans = "" cursor.execute('SELECT MAX(ID) FROM OrdinalNumberS') index = cursor.fetchone() if (index[0] == None): trans = '000000' else: index = str(index[0]) trans = index.zfill(6) d = ('a',) cursor.execute('INSERT INTO OrdinalNumberS(num) VALUES (?)',d) connection.commit() return ('2' + trans) def update_all(cursor,code,groupx,cost,price,name,category): t = (name,price,cost,category,code,groupx) cursor.execute('UPDATE Inventory SET name = ?,priceUniSug = ?,costUni = ?,category = ? WHERE code = ? AND groupx = ?',t) def increase_stock(cursor,code,groupx,quantity): cursor.execute('SELECT avail FROM Inventory WHERE code = ? AND groupx = ?',(code,groupx)) data = cursor.fetchone() avail = int(data[0]) + quantity cursor.execute('UPDATE Inventory SET avail = ? WHERE code = ? AND groupx = ?',(avail,code,groupx)) return True def decrease_stock(cursor,code,groupx,quant): #Reduce stock cursor.execute('SELECT avail FROM Inventory WHERE code = ? AND groupx = ?',(code,groupx)) data = cursor.fetchone() avail = int(data[0]) - quant cursor.execute('UPDATE Inventory SET avail = ? WHERE code = ? AND groupx = ?',(avail,code,groupx)) return True def print_(cursor,table):#Print any table. cursor.execute('SELECT * FROM '+ table) data = cursor.fetchall() for row in data: print(row) return True
/mec_login.py
""" Author:Christopher Holder Project : Version 1.0(Login) """ import sqlite3 import sys def create_login_table(cursor,connection):#Creates login table. cursor.execute("CREATE TABLE IF NOT EXISTS login(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, User TEXT NOT NULL, Pass TEXT NOT NULL,class TEXT NOT NULL,dat TEXT);") cursor.execute("SELECT User FROM login WHERE User = 'Administrator'") data = cursor.fetchone() if data == None: print("...............Adding admin account") cursor.execute("INSERT INTO login (User, Pass,class,dat)""VALUES ('Administrator','nimda','admin',date('now'))") print("...............Account added") connection.commit() return True def check_login(cursor,username,password):# Logs in ,returns current user. a = (username,password,) cursor.execute("SELECT User,Pass FROM login WHERE User = ? AND Pass = ?",a) data = cursor.fetchone()#Returns a single tuple. if data == None:#f returns type None. print("Not registered") return False return True def add_user(cursor,username,password): a = (username,) b = (username,password,) cursor.execute("SELECT User FROM login WHERE User =?", a) data = cursor.fetchone() if data == None: print("Username not valid") if len(password) < 8: print("Must be at least 8 characters.") return False cursor.execute("INSERT INTO login (User, Pass,class,dat) VALUES (?,?,'regular',date('now'))", b) print("Succesful registration.") return True else: print("Already registered") return False def print_login_table(cursor): elems = cursor.execute("SELECT * FROM login") data = cursor.fetchall() for row in data: print(row) def check_if_admin(cursor,username): a =(username,) cursor.execute("SELECT class FROM login WHERE User = ?",a) data = cursor.fetchone() if data == None: return False elif data[0] == 'admin': return True else: return False def remove_user(): pass def log_out(cursor,connection): print('') print('.................Closing') connection.commit() cursor.close() sys.exit()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
gouravsaini021/maruti
refs/heads/master
{"/maruti/deepfake/dataset.py": ["/maruti/utils.py", "/maruti/sizes.py", "/maruti/vision/video.py"], "/maruti/deepfake/models.py": ["/maruti/torch/utils.py"], "/maruti/__init__.py": ["/maruti/torch/utils.py", "/maruti/deepfake/dataset.py"], "/maruti/vision/__init__.py": ["/maruti/vision/image.py"], "/maruti/deepfake/__init__.py": ["/maruti/deepfake/dataset.py"]}
└── ├── maruti │ ├── __init__.py │ ├── deepfake │ │ ├── __init__.py │ │ ├── dataset.py │ │ ├── models.py │ │ └── utils.py │ ├── imports │ │ ├── __init__.py │ │ ├── general.py │ │ └── ml.py │ ├── kaggle.py │ ├── sizes.py │ ├── torch │ │ ├── __init__.py │ │ ├── callback.py │ │ └── utils.py │ ├── utils.py │ └── vision │ ├── __init__.py │ ├── image.py │ └── video.py ├── setup.py └── tests ├── test_sizes.py ├── test_utils.py ├── torch │ └── test_utils.py └── vision └── test_image.py
/maruti/__init__.py
from . import utils from . import sizes from . import vision from . import deepfake from . import kaggle from . import torch from .utils import * from .sizes import * from .deepfake import ImageReader from .torch import Learner
/maruti/deepfake/__init__.py
from . import dataset from . import models from .dataset import VideoDataset, DeepfakeDataset, ImageReader from .dataset import transform, group_transform
/maruti/deepfake/dataset.py
import pathlib from warnings import warn import subprocess import maruti import os from os.path import join from PIL import Image import torch import shlex import time from collections import defaultdict from ..vision.video import get_frames_from_path, get_frames import random from ..utils import unzip, read_json from ..sizes import file_size import numpy as np import cv2 from tqdm.auto import tqdm from torchvision import transforms as torch_transforms from torch.utils.data import Dataset from ..torch.utils import def_norm as normalize DATA_PATH = join(os.path.dirname(__file__), 'data/') __all__ = ['split_videos', 'VideoDataset', 'transform', 'group_transform'] transform = { 'train': torch_transforms.Compose( [ torch_transforms.ToPILImage(), torch_transforms.ColorJitter(0.3, 0.3, 0.3, 0.1), torch_transforms.RandomHorizontalFlip(), torch_transforms.RandomResizedCrop((224, 224), scale=(0.65, 1.0)), torch_transforms.ToTensor(), normalize, ] ), 'val': torch_transforms.Compose([ torch_transforms.ToTensor(), normalize, ] ) } group_transform = { 'train': lambda x: torch.stack(list(map(transform['train'], x))), 'val': lambda x: torch.stack(list(map(transform['val'], x))) } class ImageReader: def __init__(self, path, metadata, is_path_cache=False, vb=True, ignore_frame_errors=False): self.vid2part = {} self.meta = metadata self.ignore_frame_errors = ignore_frame_errors if not is_path_cache: parts = os.listdir(path) assert len(parts) > 0, 'no files found' start = time.perf_counter() for part in parts: path_to_part = os.path.join(path, part) imgs = os.listdir(path_to_part) for img in imgs: self.vid2part[self.vid_name(img)] = path_to_part end = time.perf_counter() if vb: print('Total time taken:', (end - start) / 60, 'mins') else: self.vid2part = maruti.read_json(path) def is_real(self, vid): return self.meta[vid]['label'] == 'REAL' def is_fake(self, vid): return not self.is_real(vid) def is_error(self, vid): return 'error' in self.meta[vid] def vid_name(self, img_name): name = img_name.split('_')[0] return name + '.mp4' def create_name(self, vid, frame, person): return f'{vid[:-4]}_{frame}_{person}.jpg' def total_persons(self, vid): if self.is_real(vid): return self.meta[vid]['pc'] orig_vid = self.meta[vid]['original'] return self.meta[orig_vid]['pc'] def random_person(self, vid, frame): person = random.choice(range(self.total_persons(vid))) return self.get_image(vid, frame, person) def random_img(self, vid): frame = random.choice(range(self.total_frames(vid))) person = random.choice(range(self.total_persons(vid))) return self.get_image(vid, frame, person) def sample(self): vid = random.choice(list(self.vid2part)) while self.is_error(vid): vid = random.choice(list(self.vid2part)) frame = random.choice(range(self.total_frames(vid))) person = random.choice(range(self.total_persons(vid))) return self.get_image(vid, frame, person) def total_frames(self, vid): return self.meta[vid]['fc'] - 1 def create_absolute(self, name): path = os.path.join(self.vid2part[self.vid_name(name)], name) return path def get_image(self, vid, frame, person): if self.total_persons(vid) <= person: raise Exception('Not Enough Persons') if self.total_frames(vid) <= frame: if self.ignore_frame_errors: frame = self.total_frames(vid) - 1 else: raise Exception('Not Enough Frames') img = self.create_name(vid, frame, person) path = self.create_absolute(img) return Image.open(path) def split_videos(meta_file): ''' Groups real-fake videos in dictionary ''' split = defaultdict(lambda: set()) for vid in meta_file: if meta_file[vid]['label'] == 'FAKE': split[meta_file[vid]['original']].add(vid) return split class VideoDataset: ''' create dataset from videos and metadata. @params: To download and create use VideoDataset.from_part method ''' def __init__(self, path, metadata_path=None): self.path = pathlib.Path(path) self.video_paths = list(self.path.glob('*.mp4')) metadata_path = metadata_path if metadata_path else self.path / 'metadata.json' try: self.metadata = read_json(metadata_path) except FileNotFoundError: del metadata_path print('metadata file not found.\n Some functionalities may not work.') if hasattr(self, 'metadata'): self.video_groups = split_videos(self.metadata) @staticmethod def download_part(part='00', download_path='.', cookies_path=join(DATA_PATH, 'kaggle', 'cookies.txt')): dataset_path = f'https://www.kaggle.com/c/16880/datadownload/dfdc_train_part_{part}.zip' # folder = f'dfdc_train_part_{int(part)}' command = f'wget -c --load-cookies {cookies_path} {dataset_path} -P {download_path}' command_args = shlex.split(command) fp = open(os.devnull, 'w') download = subprocess.Popen(command_args, stdout=fp, stderr=fp) bar = tqdm(total=10240, desc='Downloading ') zip_size = 0 while download.poll() is None: time.sleep(0.1) try: new_size = int( file_size(download_path + f'/dfdc_train_part_{part}.zip')) bar.update(new_size - zip_size) zip_size = new_size except FileNotFoundError: continue if download.poll() != 0: print('some error') print('download', download.poll()) download.terminate() fp.close() bar.close() return download_path + f'/dfdc_train_part_{part}.zip' @classmethod def from_part(cls, part='00', cookies_path=join(DATA_PATH, 'kaggle', 'cookies.txt'), download_path='.'): folder = f'dfdc_train_part_{int(part)}' if os.path.exists(pathlib.Path(download_path) / folder): return cls(pathlib.Path(download_path) / folder) downloaded_zip = cls.download_part( part=part, download_path=download_path, cookies_path=cookies_path) unzip(downloaded_zip, path=download_path) os.remove(download_path + f'/dfdc_train_part_{part}.zip') path = pathlib.Path(download_path) / folder return cls(path) def __len__(self): return len(self.video_paths) def n_groups(self, n, k=-1): ''' returns random n real-fake pairs by default. else starting from k. ''' if k != -1: if n + k >= len(self.video_groups): warn(RuntimeWarning( 'n+k is greater then video length. Returning available')) n = len(self.video_groups) - k - 1 return self.video_groups[k:n + k] if n >= len(self.video_groups): warn(RuntimeWarning('n is greater then total groups. Returning available')) n = len(self.video_groups) - 1 return choices(self.video_groups, k=n) class VidFromPathLoader: """ Loader to use with DeepfakeDataset class""" def __init__(self, paths, img_reader=None): """paths as {'00':/part/00,'01'..}""" self.path = paths self.img_reader = self.img_reader if img_reader is None else img_reader @staticmethod def img_reader(path, split='val', max_limit=40): frame_no = 0 if split == 'val' else random.randint(0, max_limit) frame = list(get_frames_from_path( path, [frame_no]))[0] return frame @staticmethod def img_group_reader(path, split='val', mode='distributed', num_frames=4, mode_info=[None]): """use with partial to set mode mode info: distributed -> No Use forward -> {jumps, index:0, readjust_jumps: True} backward -> {jumps, index:-1, readjust_jumps: True} -1 refers to end""" cap = cv2.VideoCapture(path) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) if mode == 'distributed': frames = np.linspace(0, frame_count - 1, num_frames, dtype=int) elif mode == 'forward': start = mode_info.get('index', 0) adjust = mode_info.get('readjust_jumps', True) jumps = mode_info['jumps'] if adjust: frames = np.linspace(start, min( frame_count - 1, start + (num_frames - 1) * jumps), num_frames, dtype=int) else: frames = np.linspace( start, start + (num_frames - 1) * jumps, num_frames, dtype=int) elif mode == 'backward': end = mode_info.get('index', frame_count) adjust = mode_info.get('readjust_jumps', True) jumps = mode_info['jumps'] if adjust: frames = np.linspace( max(0, start - (num_frames - 1) * jumps), end, num_frames, dtype=int) else: frames = np.linspace( start + (num_frames - 1) * jumps, num_frames, end, dtype=int) return get_frames(cap, frames, 'rgb') def __call__(self, metadata, video, split='val'): vid_meta = metadata[video] video_path = join(self.path[vid_meta['part']], video) return self.img_reader(video_path, split) class DeepfakeDataset(Dataset): """Methods 'f12' r1-f1, r1-f2..,(default) 'f..' r1-f1/f2/f3.. 'f1' r1-f1, 'ff' r f1 f2 f3.. Metadata 'split'(train-val),'label'(FAKE-REAL),'fakes'([video,video]) loader func(metadata,video,split)->input error_handler func(self, index, error)->(input, label)""" iteration = 0 def __init__(self, metadata, loader, transform=None, split='train', method='f12', error_handler=None): self.transform = transform self.split = split self.loader = loader self.method = method self.error_handler = error_handler self.metadata = metadata self.dataset = [] real_videos = filter( lambda x: metadata[x]['split'] == split, list(split_videos(metadata))) for real_video in real_videos: fake_videos = list(metadata[real_video]['fakes']) self.dataset.append(real_video) if method == 'f12': self.dataset.append( fake_videos[self.iteration % len(fake_videos)]) elif method == 'f..': self.dataset.append(random.choice(fake_videos)) elif method == 'f1': self.dataset.append(fake_videos[0]) elif method == 'ff': for fake_video in fake_videos: self.dataset.append(fake_video) else: raise ValueError( 'Not a valid method. Choose from f12, f.., f1, ff') def __getitem__(self, i): if i == 0: self.iteration += 1 try: img = self.loader(self.metadata, self.dataset[i], split=self.split) label = torch.tensor( [float(self.metadata[self.dataset[i]]['label'] == 'FAKE')]) if self.transform is not None: img = self.transform(img) return img, label except Exception as e: if self.error_handler is None: def default_error_handler(obj, x, e): print(f'on video {self.dataset[x]} error: {e}') return self[random.randint(1, len(self) - 1)] self.error_handler = default_error_handler return self.error_handler(self, i, e) def __len__(self): return len(self.dataset)
/maruti/deepfake/models.py
import torch.nn as nn import torchvision from ..torch.utils import freeze from torch.nn.utils.rnn import PackedSequence, pack_sequence import itertools from .dataset import group_transform def resnext50(feature=False, pretrained=False): model = torchvision.models.resnext50_32x4d(pretrained) if feature: model.fc = nn.Identity() else: model.fc = nn.Linear(2048, 1) return model def binaryClassifier(input_features): return nn.Sequential( nn.Linear(input_features, input_features // 2), nn.ReLU(), nn.BatchNorm1d(input_features // 2), nn.Linear(input_features // 2, input_features // 2), nn.ReLU(), nn.BatchNorm1d(input_features // 2), nn.Linear(input_features // 2, 128), nn.ReLU(), nn.Dropout(), nn.Linear(128, 1), nn.Flatten()) class ResLSTM(nn.Module): def __init__(self, pretrained=False, hidden_size=512, num_layers=1, bidirectional=True, dropout=0.5): super().__init__() # resnext self.feature_model = resnext50(True, pretrained) # lstm self.hidden_size = hidden_size self.lstm = nn.LSTM(2048, hidden_size=hidden_size, num_layers=num_layers, bidirectional=bidirectional, dropout=dropout) classifier_features = hidden_size * num_layers if bidirectional: classifier_features *= 2 self.classifier = binaryClassifier(classifier_features) def forward(self, x): # indices unsorted_indices = x.unsorted_indices # prediction on all images from each batch x_data = self.feature_model(x.data) # converting again to PackedSequence x = PackedSequence(x_data, x.batch_sizes) # lstm out, (h, c) = self.lstm(x) batch_size = h.shape[1] # treat each batch differently instaed of lstm layer split_on_batch = h.permute(1, 0, 2) # reshape to make each bach flat combining_passes = split_on_batch.reshape(batch_size, -1) # classify val = self.classifier(combining_passes).squeeze(1) return val[unsorted_indices] def param(self, i=-1): # all if i == -1: return self.parameters() # grouped if i == 0: return itertools.chain(self.feature_model.conv1.parameters(), self.feature_model.bn1.parameters(), self.feature_model.layer1.parameters(),) if i == 1: return itertools.chain(self.feature_model.layer2.parameters(), self.feature_model.layer3.parameters()) if i == 2: return itertools.chain(self.feature_model.layer4.parameters(), self.feature_model.fc.parameters()) if i == 3: return itertools.chain(self.lstm.parameters(), self.classifier.parameters()) else: print('there are only 4 param groups -> 0,1,2,3') class ReslstmNN(nn.Module): def __init__(self, num_sets=6, pretrained=False, hidden_size=512, num_layers=1, bidirectional=True, dropout=0.5): super().__init__() self.feature = ResLSTM(pretrained=pretrained, hidden_size=hidden_size, num_layers=num_layers, bidirectional=bidirectional, dropout=dropout) self.feature.classifier[9] = nn.Identity() self.feature.classifier[10] = nn.Identity() self.classifier = binaryClassifier(128 * num_sets) def forward(x): preds = [] for vid_set in x: preds.append(self.feature(vid_set)) preds = torch.cat(preds, dim=1) preds = self.classifier(preds) return preds.squeeze(dim=1) @staticmethod def transform(vid_sets): transformed = [] for vid in vid_sets: transformed.append(group_transform(vid)) return transformed @staticmethod def collate(batches): ps_list = [] for set_idx in range(len(batches[0][0])): vids = [batch[set_idx] for batch, target in batches] ps = pack_sequence(vids, False) ps_list.append(ps) return ps, torch.tensor([target for _, target in batches])
/maruti/deepfake/utils.py
# from torch_lr_finder import LRFinder from tqdm.auto import tqdm from functools import partial import torch import time tqdm_nl = partial(tqdm, leave=False) class Callback: pass def _limit_string(string, length): string = str(string) if length > len(string): return string else: return string[:length - 2] + '..' def _time_rep(seconds): if seconds >= 3600: return time.strftime('%H:%M:%S', time.gmtime(seconds)) else: return time.strftime('%M:%S', time.gmtime(seconds)) class Learner: def __init__(self, model): self.model = model def compile(self, optimizer, loss, lr_scheduler=None, device='cpu', metrics=None): self.optimizer = optimizer self.loss = loss self.device = device if lr_scheduler is not None: self.lr_scheduler = lr_scheduler if metrics is not None: self.metrics = metrics else: self.metrics = [] def fit(self, epochs, train_loader, val_loader=None, accumulation_steps=1): # TODO: test for model on same device best_loss = float('inf') each_train_info = [] each_val_info = [] complete_info = {} header_string = '' headings = ['Train Loss', 'Val Loss'] for i in range(len(self.metrics)): headings.append(self.metrics[i].__name__) if i == 2: break for heading in headings: header_string += _limit_string(heading, 12).center(12) + '|' header_string += 'Time'.center(12) + '|' print(header_string) # train self.optimizer.zero_grad() for epoch in tqdm_nl(range(epochs)): self.model.train() train_info = {} val_info = {} train_info['losses'] = [] start_time = time.perf_counter() train_length = len(train_loader) for i, (inputs, targets) in tqdm_nl(enumerate(train_loader), total=train_length, desc='Training: '): inputs, targets = inputs.to( self.device), targets.to(self.device) pred = self.model(inputs) loss = self.loss(pred, targets) train_info['losses'].append(loss) loss.backward() if (i + 1) % accumulation_steps == 0: self.optimizer.step() if hasattr(self, 'lr_scheduler'): self.lr_scheduler.step() self.optimizer.zero_grad() train_info['time'] = time.perf_counter() - start_time if val_loader is not None: val_info = self.validate(val_loader) info_string = '' def format_infos(x, length): return _limit_string(round(torch.stack(x).mean().item(), 2), 12).center(12) info_values = [format_infos(train_info['losses'], 12)] if 'losses' in val_info: info_values.append(format_infos(val_info['losses'], 12)) if torch.stack(val_info['losses']).mean().item() < best_loss: complete_info['best_state_dict'] = self.model.state_dict() else: if torch.stack(train_info['losses']).mean().item() < best_loss: complete_info['best_state_dict'] = self.model.state_dict() info_values.append(str(None).center(12)) for i, metric in enumerate(self.metrics): info_values.append(format_infos( val_info['metrics'][metric.__name__], 12)) if i == 2: break total_time = train_info['time'] if 'time' in val_info: total_time += val_info['time'] info_values.append(_time_rep(total_time).center(12)) tqdm.write('|'.join(info_values) + '|') each_train_info.append(train_info) each_val_info.append(val_info) complete_info = {**complete_info, 'train': each_train_info, 'val': each_val_info} return complete_info def validate(self, val_loader): information = {} information['losses'] = [] information['metrics'] = {} for metric in self.metrics: information['metrics'][metric.__name__] = [] self.model.eval() val_loss = torch.zeros(1) start_time = time.perf_counter() with torch.set_grad_enabled(False): for inputs, targets in tqdm_nl(val_loader, desc='Validating: '): inputs, targets = inputs.to( self.device), targets.to(self.device) pred = self.model(inputs) loss = self.loss(pred, targets) information['losses'].append(loss) for metric in self.metrics: information['metrics'][metric.__name__].append( metric(pred, targets)) total_time = time.perf_counter() - start_time information['time'] = total_time return information
/maruti/imports/__init__.py
from . import general from . import ml
/maruti/imports/general.py
import os import shutil from glob import glob from tqdm.auto import tqdm import itertools import random import time from functools import partial __all__ = ['time','random','os', 'shutil', 'glob', 'tqdm', 'itertools', 'partial']
/maruti/imports/ml.py
from .general import * from .general import __all__ as gen_all import torch import torch.nn as nn import torchvision.transforms as torch_transforms import torchvision import maruti.torch as mtorch import maruti.deepfake.dataset as mdata import maruti import maruti.deepfake as mfake import numpy as np import cv2 import maruti.vision as mvis import pandas as pd import torch.utils.data as tdata import matplotlib.pyplot as plt from torch.utils import data import torch.optim as optim device = 'cuda:0' if torch.cuda.is_available() else 'cpu' __all__ = gen_all + ['mfake','mvis', 'cv2', 'mdata', 'tdata', 'pd', 'device', 'plt', 'np', 'torch', 'nn', 'torch_transforms', 'torchvision', 'mtorch', 'maruti', 'data', 'optim']
/maruti/kaggle.py
import os import subprocess from pathlib import Path import zipfile def set_variables(credentials: 'lists[str,str]=[username, token]'): os.environ['KAGGLE_USERNAME'] = credentials[0] os.environ['KAGGLE_KEY'] = credentials[1] def update_dataset(path, slug, message='new version', clean=False): folder = os.path.basename(path) path = os.path.dirname(path) path = Path(path) os.mkdir(path / folder / folder) subprocess.call(['kaggle', 'datasets', 'download', '-p', str(path / folder / folder), 'ankitsainiankit/' + slug, '--unzip']) subprocess.call(['kaggle', 'datasets', 'metadata', '-p', str(path / folder), 'ankitsainiankit/' + slug]) subprocess.call(['kaggle', 'datasets', 'version', '-m', message, '-p', path / folder])
/maruti/sizes.py
from sys import getsizeof import os __all__ = ['dir_size','file_size','var_size'] def byte_to_mb(size): return size/(1024**2) def dir_size(start_path='.'): total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) # skip if it is symbolic link if not os.path.islink(fp): total_size += os.path.getsize(fp) return byte_to_mb(total_size) def file_size(path): file_stats = os.stat(path) return byte_to_mb(file_stats.st_size) def var_size(var): return byte_to_mb(getsizeof(var)) __all__ = ['var_size','file_size','dir_size']
/maruti/torch/__init__.py
from . import utils from . import metrics from .utils import *
/maruti/torch/callback.py
import os from datetime import datetime, timezone, timedelta from torch.utils.tensorboard import SummaryWriter from copy import deepcopy class Callback: def on_epoch_end(self, losses, metrics, extras, epoch): """ extras-> dict ['time']['model'] """ pass def on_epoch_start(self, epoch): pass def on_batch_start(self, epoch, batch): pass def on_batch_end(self, loss, metrics, extras, epoch, batch): pass def on_validation_start(self, epoch): pass def on_validation_end(self, loss, metrics, epoch): pass def on_min_val_start(self, epoch, batch): pass def on_min_val_end(self, loss, metrics, extras, epoch, batch): """extras['model']""" pass def on_train_start(self, epoch): pass def Compose(callbacks): class NewCallback(Callback): def on_epoch_end(self, losses, metrics, extras, epoch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_epoch_end( losses, metrics, extras, epoch) return isEnd def on_epoch_start(self, epoch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_epoch_start(epoch) return isEnd def on_batch_start(self, epoch, batch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_batch_start(epoch, batch) return isEnd def on_batch_end(self, loss, metrics, extras, epoch, batch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_batch_end(loss, metrics, extras, epoch, batch) return isEnd def on_validation_start(self, epoch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_validation_start(epoch) return isEnd def on_validation_end(self, loss, metrics, epoch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_validation_end( loss, metrics, epoch) return isEnd def on_min_val_start(self, epoch, batch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_min_val_start( epoch, batch) return isEnd def on_min_val_end(self, loss, metrics, extras, epoch, batch): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_min_val_end( loss, metrics, extras, epoch, batch) return isEnd def on_train_start(self, epochs): isEnd = False for callback in callbacks: isEnd = isEnd or callback.on_train_start(epochs) return isEnd return NewCallback() class Recorder(Callback): def __init__(self): self.best_model = None self.best_score = float('inf') self.summaries = [] self.others = [] self.prevs = [] # to monitor if the learner was stopped in between of an epoch self.epoch_started = False def on_train_start(self, epochs): self.new_state() def new_state(self): sd = self.state_dict() del sd['prevs'] self.prevs.append(self.state_dict()) self.summaries = [] self.others = [] def on_epoch_start(self, epoch): if self.epoch_started: self.new_state() self.summaries.append({}) self.others.append({'train_losses': [], 'train_metrics': []}) self.epoch_started = True def on_batch_end(self, train_loss, train_metrics, extras, epoch, batch): self.others[epoch]['train_losses'].append(train_loss) self.others[epoch]['train_metrics'].append(train_metrics) @property def last_summary(self): if self.summaries: return self.summaries[-1] raise Exception('no summaries exists') def on_min_val_end(self, loss, metrics, extras, epoch, batch): if loss < self.best_score: self.best_score = loss self.best_model = deepcopy(extras['model'].state_dict()) def on_epoch_end(self, losses, metrics, extras, epoch): self.summaries[epoch]['train_loss'] = losses['train'] self.summaries[epoch]['train_metrics'] = metrics['train'] self.summaries[epoch]['time'] = extras['time'] representative_loss = 'train' # for best model udpate if 'val' in losses: representative_loss = 'val' self.summaries[epoch]['val_loss'] = losses['val'] if 'val' in metrics: self.summaries[epoch]['val_metrics'] = metrics['val'] if losses[representative_loss] < self.best_score: self.best_score = losses[representative_loss] self.best_model = deepcopy(extras['model']) self.epoch_started = False def state_dict(self): state = {} state['best_score'] = self.best_score state['best_model'] = self.best_model state['summaries'] = self.summaries state['others'] = self.others state['prevs'] = self.prevs return deepcopy(state) def load_state_dict(self, state): self.best_score = state['best_score'] self.best_model = state['best_model'] self.summaries = state['summaries'] self.others = state['others'] self.prevs = state['prevs'] class BoardLog(Callback): def __init__(self, comment='learn', path='runs'): self.path = path self.run = 0 self.comment = comment self.batch_count = 0 def on_train_start(self, epochs): india_timezone = timezone(timedelta(hours=5.5)) time_str = datetime.now(tz=india_timezone).strftime('%d_%b_%H:%M:%S') path = os.path.join(self.path, self.comment, time_str) self.writer = SummaryWriter(log_dir=path, flush_secs=30) self.run += 1 def on_batch_end(self, loss, metrics, extras, epoch, batch): lr_vals = {} for i, param in enumerate(extras['optimizer'].param_groups): lr_vals['lr_' + str(i)] = param['lr'] self.writer.add_scalars( 'batch', {'loss': loss, **metrics, **lr_vals}, global_step=self.batch_count) self.batch_count += 1 def on_min_val_end(self, loss, metrics, extras, epoch, batch): self.writer.add_scalars( 'min_val', {'loss': loss, **metrics}, global_step=self.batch_count) def on_epoch_end(self, losses, metrics, extras, epoch): self.writer.add_scalars('losses', losses, global_step=epoch) for metric in metrics['train']: self.writer.add_scalars(metric, {'val': metrics['val'][metric], 'train': metrics['train'][metric]}, global_step=epoch)
/maruti/torch/utils.py
# from torch_lr_finder import LRFinder from tqdm.auto import tqdm from functools import partial import torch import time import numpy as np from collections import Counter from torchvision import transforms as torch_transforms from . import callback as mcallback tqdm_nl = partial(tqdm, leave=False) __all__ = ['unfreeze', 'freeze', 'unfreeze_layers', 'freeze_layers', 'Learner'] def_norm = torch_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def children_names(model): return set([child[0] for child in model.named_children()]) def apply_method(model, method): for param in model.parameters(): param.requires_grad = True if method == 'unfreeze' else False def unfreeze(model): apply_method(model, 'unfreeze') def freeze(model): apply_method(model, 'freeze') def apply_recursively(model, layer_dict, method): if layer_dict is None: apply_method(model, method) else: memo = set() for name, child in model.named_children(): if name in layer_dict: memo.add(name) apply_recursively(child, layer_dict[name], method) for name, parameter in model.named_parameters(): if name in layer_dict and name not in memo: parameter.requires_grad = True if method == 'unfreeze' else False def _dict_from_layers(layers): if layers is None: return {None} splitted = [layer.split('.') for layer in layers] childs = [split[0] for split in splitted] child_count = Counter(childs) layer_dict = {child: {} for child in child_count} none_layers = set() for split in splitted: if len(split) == 1: none_layers.add(split[0]) else: layer_dict[split[0]] = {**layer_dict[split[0]], **_dict_from_layers(split[1:]), } for none_layer in none_layers: layer_dict[none_layer] = None return layer_dict def freeze_layers(model: 'torch.nn Module', layers: 'generator of layer names'): apply_recursively(model, _dict_from_layers(layers), 'freeze') def unfreeze_layers(model: 'torch.nn Module', layers: 'generator of layer names'): apply_recursively(model, _dict_from_layers(layers), 'unfreeze') def _limit_string(string, length): string = str(string) if length > len(string): return string else: return string[:length - 2] + '..' def _time_rep(seconds): if seconds >= 3600: return time.strftime('%H:%M:%S', time.gmtime(seconds)) else: return time.strftime('%M:%S', time.gmtime(seconds)) class Learner: def __init__(self, model): self.model = model self.call_count = 0 self.record = mcallback.Recorder() def compile(self, optimizer, loss, lr_scheduler=None, device='cpu', metrics=None, callback=mcallback.Callback(), max_metric_prints=3): self.optimizer = optimizer self.loss = loss self.metrics_plimit = max_metric_prints self.device = device self.cb = mcallback.Compose([callback, self.record]) if lr_scheduler is not None: self.lr_scheduler = lr_scheduler if metrics is not None: self.metrics = metrics else: self.metrics = [] def state_dict(self): if not hasattr(self, 'optimizer'): print('You first need to compile the learner') return state = { 'record': self.record.state_dict(), 'model': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), } if hasattr(self, 'lr_scheduler'): state['lr_scheduler'] = self.lr_scheduler.state_dict() return state def load_state_dict(self, state): """Return True if everything wents write. Else raises error or returns False.""" if not hasattr(self, 'optimizer'): print('Compile with earlier settings.') return False self.optimizer.load_state_dict(state['optimizer']) self.model.load_state_dict(state['model']) self.record.load_state_dict(state['record']) if hasattr(self, 'lr_scheduler'): self.lr_scheduler.load_state_dict(state['lr_scheduler']) else: if 'lr_scheduler' in state: print( 'lr_scheduler is missing. Recommended to compile with same settings.') return False return True @property def header_str(self): header_string = '' # loss headings = ['Train Loss', 'Val Loss'] # metrics for i in range(len(self.metrics)): headings.append(self.metrics[i].__name__) if i == self.metrics_plimit: break # time headings.append('Time') # getting together for heading in headings: header_string += _limit_string(heading, 12).center(12) + '|' return header_string @property def epoch_str(self): info = self.record.last_summary info_string = '' info_vals = [info['train_loss'], info['val_loss'] if 'val_loss' in info else None] for i in range(len(self.metrics)): info_vals.append(info['val_metrics'][self.metrics[i].__name__]) if i == self.metrics_plimit: break info_vals.append(_time_rep(info['time'])) for info_val in info_vals: if isinstance(info_val, int): info_val = round(info_val, 5) info_string += _limit_string(info_val, 12).center(12) + '|' return info_string @property def summary_str(self): total_time = sum( map(lambda x: x['time'], self.record.summaries)) best_score = self.record.best_score return f'Total Time: {_time_rep(total_time)}, Best Score: {best_score}' def execute_metrics(self, ypred, y): metric_vals = {} for metric in self.metrics: # TODO: make better handling of non_scalar metrics metric_vals[metric.__name__] = metric(ypred, y).item() return metric_vals def fit(self, epochs, train_loader, val_loader=None, accumulation_steps=1, save_on_epoch='learn.pth', min_validations=0): # TODO: test for model on same device # Save_on_epoch = None or False to stop save, else path to save min_validation_idx = set(np.linspace( 0, len(train_loader), min_validations + 1, dtype=int)[1:]) self.call_count += 1 print(self.header_str) # train self.optimizer.zero_grad() if self.cb.on_train_start(epochs): return for epoch in tqdm_nl(range(epochs)): epoch_predictions = [] epoch_targets = [] if self.cb.on_epoch_start(epoch): return self.model.train() start_time = time.perf_counter() train_length = len(train_loader) for i, (inputs, targets) in tqdm_nl(enumerate(train_loader), total=train_length, desc='Training: '): if self.cb.on_batch_start(epoch, i): return inputs, targets = inputs.to( self.device), targets.to(self.device) pred = self.model(inputs) loss = self.loss(pred, targets) # logging epoch_predictions.append(pred.clone().detach()) epoch_targets.append(targets.clone().detach()) batch_metrics = self.execute_metrics(pred, targets) # loss.backward() if (i + 1) % accumulation_steps == 0: self.optimizer.step() if hasattr(self, 'lr_scheduler'): self.lr_scheduler.step() self.optimizer.zero_grad() batch_extras = {'optimizer': self.optimizer, } if hasattr(self, 'lr_scheduler'): batch_extras['lr_scheduler'] = self.lr_scheduler if self.cb.on_batch_end(loss.item(), batch_metrics, batch_extras, epoch, i): return if val_loader is not None: if i in min_validation_idx: del inputs del targets if self.cb.on_min_val_start(epoch, i): return min_val_loss, min_val_metrics = self._validate( val_loader) min_val_extras = {'model': self.model} if self.cb.on_min_val_end(min_val_loss, min_val_metrics, min_val_extras, epoch, i): return self.model.train() epoch_predictions = torch.cat(epoch_predictions) epoch_targets = torch.cat(epoch_targets) train_loss = self.loss( epoch_predictions, epoch_targets).clone().detach().item() train_metrics = self.execute_metrics( epoch_predictions, epoch_targets) losses = {'train': train_loss} metrics = {'train': train_metrics} if val_loader is not None: if self.cb.on_validation_start(epoch): return val_loss, val_metrics = self._validate(val_loader) losses['val'] = val_loss metrics['val'] = val_metrics if self.cb.on_validation_end(val_loss, val_metrics, epoch): return if save_on_epoch: torch.save(self.state_dict(), save_on_epoch) epoch_extra_dict = {'time': time.perf_counter() - start_time, 'model': self.model.state_dict(), 'optimizer': self.optimizer, } if hasattr(self, 'lr_scheduler'): epoch_extra_dict['lr_scheduler'] = self.lr_scheduler if self.cb.on_epoch_end(losses, metrics, epoch_extra_dict, epoch): return # this should after the epoch_end callback to be ready tqdm.write(self.epoch_str) print(self.summary_str) def predict(self, data_loader, with_targets=True): self.model.eval() prediction_ar = [] target_ar = [] with torch.no_grad(): if with_targets: for inputs, targets in tqdm_nl(data_loader, desc='Predicting: '): inputs, targets = inputs.to( self.device), targets.to(self.device) pred = self.model(inputs) prediction_ar.append(pred) target_ar.append(targets) return torch.cat(prediction_ar), torch.cat(target_ar) for inputs in tqdm_nl(data_loader, desc='Predicting: '): inputs = inputs.to(self.device) pred = self.model(inputs) prediction_ar.append(pred) return torch.cat(prediction_ar) def validate(self, val_loader): self.call_count += 1 return self._validate(val_loader) def _validate(self, val_loader): pred, target = self.predict(val_loader) loss = self.loss(pred, target).clone().detach().item() metrics = self.execute_metrics(pred, target) return loss, metrics
/maruti/utils.py
import json import zipfile import time from tqdm.auto import tqdm __all__ = ['rep_time', 'read_json', 'write_json', 'unzip'] def rep_time(seconds): if seconds >= 3600: return time.strftime('%H:%M:%S', time.gmtime(seconds)) else: return time.strftime('%M:%S', time.gmtime(seconds)) def read_json(path): ''' Read Json file as dict. ''' with open(path, 'rb') as file: json_dict = json.load(file) return json_dict def write_json(dictionary, path): """ Write dict as a json file """ with open(path, 'w') as fp: json.dump(dictionary, fp) def unzip(zip_path, path='.'): with zipfile.ZipFile(zip_path) as zf: for member in tqdm(zf.infolist(), desc='Extracting '): try: zf.extract(member, path) except zipfile.error as e: pass
/maruti/vision/__init__.py
from . import image from . import video from .image import make_grid from .video import *
/maruti/vision/image.py
import numpy as np import cv2 from functools import lru_cache from functools import partial from os.path import join import os from PIL import Image import torch __all__ = ['brightness_score', 'adjust_brightness', 'crop_around_point', 'make_grid'] DATA_PATH = join(os.path.dirname(__file__), 'data') def brightness_score(img): ''' @params: img - an array with shape (w/h, w/h, 3) ''' cols, rows = img.shape[:2] return np.sum(img) / (255 * cols * rows) def adjust_brightness(img, min_brightness): ''' Increase of decrease brightness @params: img - an array with shape (w,h,3) ''' brightness = brightness_score(img) ratio = brightness / min_brightness return cv2.convertScaleAbs(img, alpha=1 / ratio, beta=0) def crop_around_point(img, point, size): ''' crop a rectangle with size centered at point @params: size (h,w) @params: point (x,y) ''' h, w = img.shape[:2] n_h, n_w = size r_h, r_w = h, w if h < n_h: r_h = n_h if w < n_w: r_w = n_w h_ratio = r_h / h w_ratio = r_w / w if h_ratio > w_ratio: r_w = int(r_w * h_ratio / w_ratio) elif w_ratio > h_ratio: r_h = int(r_h * w_ratio / h_ratio) pre_w, post_w = n_w // 2, n_w - (n_w // 2) pre_h, post_h = n_h // 2, n_h - (n_h // 2) img = cv2.resize(img, (r_w, r_h)) midx, midy = point startX, startY, endX, endY = 0, 0, 0, 0 if midx - pre_w < 0: startX, endX = 0, n_w elif midx + post_w - 1 >= r_w: startX, endX = r_w - n_w, r_w else: startX, endX = midx - pre_w, midx + post_w if midy - pre_h < 0: startY, endY = 0, n_h elif midy + post_h - 1 >= r_h: startY, endY = r_h - n_h, r_h else: startY, endY = midy - pre_h, midy + post_h return img[startY:endY, startX:endX] def _unNormalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]): mt = torch.FloatTensor(mean).view(1, 1, 3) st = torch.FloatTensor(std).view(1, 1, 3) return (((img * st) + mt) * 255).int().numpy().astype(np.uint8) def make_grid(imgs: '(n,h,w,c) tensor or list of (h,w,c) tensor', cols=8): "return numpy array of size (h,w,c) easy for plotting" count = len(imgs) rows = (count + cols - 1) // cols if not (imgs[0] > 5).any(): imgs = [_unNormalize(img) for img in imgs] h, w = imgs[0].shape[:-1] new_img_w = h * cols new_img_h = w * rows new_img = Image.new('RGB', (new_img_w, new_img_h)) for i in range(len(imgs)): img = Image.fromarray(np.array(imgs[i]).astype(np.uint8)) x = h * (i % cols) y = h * (i // cols) new_img.paste(img, (x, y)) return np.array(new_img)
/maruti/vision/video.py
import cv2 import numpy as np from .. import vision as mvis from facenet_pytorch import MTCNN import torch from PIL import Image from collections import defaultdict device = 'cuda:0' if torch.cuda.is_available() else 'cpu' class Video(cv2.VideoCapture): def __enter__(self): return self def __exit__(self, *args): self.release() def vid_info(path): "return frame_count, (h, w)" cap = cv2.VideoCapture(path) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) return frame_count, (h, w) def get_frames(cap: 'cv2.VideoCapture object', frames: 'iterable<int>', code='rgb', start_frame=0): """Frame numbers out of the scope will be ignored""" curr_index = int(cap.get(cv2.CAP_PROP_POS_FRAMES)) if curr_index != start_frame: cap.set(cv2.CAP_PROP_POS_FRAMES, curr_index) frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) frames = set(frames) last_frame = max(frames) if frame_count == 0: raise Exception('The video is corrupt. Closing') for i in range(curr_index, frame_count): _ = cap.grab() if i in frames: _, frame = cap.retrieve() if code == 'rgb': yield cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) else: yield frame if i == last_frame: cap.release() break cap.release() def get_frames_from_path(path: 'str or posix', frames: 'iterable<int>', code='rgb'): cap = cv2.VideoCapture(str(path)) return get_frames(cap, frames, code) def crop_face(img, points, size: "(h,w)" = None): if size: size = size[1], size[0] # cv2 resize needs (w,h) face = img[points[1]:points[3], points[0]:points[2]] if size is not None: face = cv2.resize(face, size,) return face def bbox_from_det(det_list): working_det = np.array([[0, 0, 224, 224]]) bbox = [] for detection in det_list: if detection is None: bbox.append(working_det.astype(int) * 2) else: bbox.append(detection.astype(int) * 2) working_det = detection.copy() return bbox def _face_from_det(frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=30, size=(224, 224), mtcnn=None): start = frame_idx[0] n_h, n_w = f_h // 2, f_w // 2 full_det_list = [None] * len(frame_idx) # first frame should be correct so it can compunsate upcomings if det_list[0] is None: _detection = mtcnn.detect(frames[0])[0] if _detection is not None: det_list[0] = _detection / 2 # for i, box in zip(detect_idx, det_list): full_det_list[i - start] = box bbox = bbox_from_det(full_det_list) working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112, (f_h // 2) + 112, (f_w // 2) + 112]) faces = [] for frame, box in zip(frames, bbox): best_pred = box[0] best_pred[[0, 1]] -= margin // 2 best_pred[[2, 3]] += (margin + 1) // 2 try: cropped_faces = crop_face(frame, best_pred, size=size) working_pred = best_pred except: cropped_faces = crop_face(frame, working_pred, size=size) faces.append(cropped_faces) return faces def non_overlapping_ranges(rngs): all_idx = set() for rng in rngs: for i in range(rng[0], rng[1]): all_idx.add(i) min_i = min(all_idx) max_i = max(all_idx) non_overlapping_rngs = [] last = min_i start = min_i i = min_i + 1 while i < max_i + 1: if i in all_idx: last = i i += 1 continue else: non_overlapping_rngs.append([start, last + 1]) while i not in all_idx: i += 1 start = i last = i non_overlapping_rngs.append([start, last + 1]) return non_overlapping_rngs def get_face_frames2(path, frame_rngs, jumps=4, margin=30, mtcnn=None, size: "(h,w)" = (224, 224)): # for height and width cap = cv2.VideoCapture(path) f_h, f_w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int( cap.get(cv2.CAP_PROP_FRAME_WIDTH)) n_h, n_w = f_h // 2, f_w // 2 cap.release() # non_overlapping_rngs = non_overlapping_ranges(frame_rngs) idx2face = defaultdict(lambda: None) idx2frame = defaultdict(lambda: None) if mtcnn is None: mtcnn = MTCNN(select_largest=False, device=device,) # getting video frames in one shot all_frames_idx = [] for rng in non_overlapping_rngs: all_frames_idx.extend(range(rng[0], rng[1])) vid_frames = list(get_frames_from_path(path, all_frames_idx)) for i, frame in zip(all_frames_idx, vid_frames): idx2frame[i] = frame # getting detection in one shot all_detect_idx = [] for frame_rng in non_overlapping_rngs: all_detect_idx.extend(range(frame_rng[0], frame_rng[1], jumps)) all_detect_small_frames = [cv2.resize(frame, (n_w, n_h)) for i, frame in zip( all_frames_idx, vid_frames) if i in all_detect_idx] det, conf = mtcnn.detect(all_detect_small_frames) idx2det = defaultdict(lambda: None) for i, det in zip(all_detect_idx, det): idx2det[i] = det # face crop for each non-overlapping range for frame_rng in non_overlapping_rngs: start, end = frame_rng frame_idx = list(range(start, end)) detect_idx = list(range(start, end, jumps)) frames = [idx2frame[i] for i in frame_idx] det_list = [idx2det[i] for i in detect_idx] faces = _face_from_det( frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=margin, size=size, mtcnn=mtcnn) for i, face in zip(frame_idx, faces): idx2face[i] = face # distribution to each range rng_faces = [] for rng in frame_rngs: curr_rng_faces = [] for i in range(rng[0], rng[1]): curr_rng_faces.append(idx2face[i]) rng_faces.append(curr_rng_faces) return rng_faces def crop(frame, bb): return frame[bb[1]:bb[3], bb[0]:bb[2]] def toint(bb): return [int(i) for i in bb] def apply_margin(bb, margin, size): bb = [max(0,bb[0]-margin),max(0, bb[1] - margin),min(size[0] -1, bb[2]+margin),min(size[1]-1, bb[3]+margin)] return bb def expand_detection(detections, idx, length): assert (len(detections) == len( idx)), f'length of detection ({len(detections)}) and indices ({len(idx)}) must be same' j = 0 last = detections[j] if detections[j] is not None else [] final_detections = [] for i in range(length): if i in idx: last = detections[idx.index(i)] if last is None: last = [] final_detections.append(last) return final_detections def get_all_faces(path: 'str', detections=32, mtcnn=None, margin=20): if mtcnn is None: mtcnn = MTCNN(select_largest=False, device=device,) cap = cv2.VideoCapture(path) frames = [] next_frame = True while next_frame: next_frame, fr = cap.read() if next_frame: frames.append(cv2.cvtColor(fr, cv2.COLOR_BGR2RGB)) np_det_idx = np.linspace(0, len(frames), detections, endpoint=False, dtype=int) detection_idx = list(map(int, np_det_idx)) detection_frames = [frame for i, frame in enumerate( frames) if i in detection_idx] detection = mtcnn.detect(detection_frames) detection = detection[0] del detection_frames detection = expand_detection(detection, detection_idx, len(frames)) faces = [] for i, bboxes in enumerate(detection): faces.append([]) for bbox in bboxes: bbox = apply_margin(bbox, margin, frames[0].shape[:2]) faces[-1].append(crop(frames[i], toint(bbox))) return faces def get_face_frames(path, frame_idx, margin=30, mtcnn=None, size: "(h,w)" = (224, 224),): """ Consumes more RAM as it stores all the frames in full resolution. Try to detect in small batches if needed. """ # for height and width cap = cv2.VideoCapture(path) f_h, f_w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int( cap.get(cv2.CAP_PROP_FRAME_WIDTH)) cap.release() # n_h, n_w = f_h // 2, f_w // 2 if mtcnn is None: mtcnn = MTCNN(select_largest=False, device=device,) frames = list(get_frames_from_path(path, frame_idx)) small_faces = [cv2.resize(frame, (n_w, n_h)) for frame in frames] det, conf = mtcnn.detect(small_faces) det_list = list(map(lambda x: x, det)) bbox = bbox_from_det(det_list) working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112, (f_h // 2) + 112, (f_w // 2) + 112]) faces = [] for frame, box in zip(frames, bbox): best_pred = box[0] best_pred[[0, 1]] -= margin // 2 best_pred[[2, 3]] += (margin + 1) // 2 try: cropped_faces = crop_face(frame, best_pred, size=size) working_pred = best_pred except: cropped_faces = crop_face(frame, working_pred, size=size) faces.append(cropped_faces) return faces def get_faces_frames(path, frame_idx, margin=30, mtcnn=None, size: "(h,w)" = (224, 224),): """ Consumes more RAM as it stores all the frames in full resolution. Try to detect in small batches if needed. """ # for height and width _, (f_h, f_w) = vid_info(path) # n_h, n_w = f_h // 2, f_w // 2 if mtcnn is None: mtcnn = MTCNN(select_largest=False, device=device,) frames = list(get_frames_from_path(path, frame_idx)) small_faces = [cv2.resize(frame, (n_w, n_h)) for frame in frames] det, conf = mtcnn.detect(small_faces) det_list = list(map(lambda x: x, det)) if det_list[0] is None: _detection = mtcnn.detect(frames[0])[0] if _detection is not None: det_list[0] = _detection / 2 bbox = bbox_from_det(det_list) working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112, (f_h // 2) + 112, (f_w // 2) + 112]) faces = [] for frame, box in zip(frames, bbox): all_faces = [] for face_det in box: best_pred = face_det best_pred[[0, 1]] -= margin // 2 best_pred[[2, 3]] += (margin + 1) // 2 try: cropped_faces = crop_face(frame, best_pred, size=size) working_pred = best_pred except: cropped_faces = crop_face(frame, working_pred, size=size) all_faces.append(cropped_faces) faces.append(all_faces) return faces def _face_from_det(frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=30, size=(224, 224), mtcnn=None): start = frame_idx[0] n_h, n_w = f_h // 2, f_w // 2 full_det_list = [None] * len(frame_idx) # first frame should be correct so it can compunsate upcomings if det_list[0] is None: _detection = mtcnn.detect(frames[0])[0] if _detection is not None: det_list[0] = _detection / 2 # for i, box in zip(detect_idx, det_list): full_det_list[i - start] = box bbox = bbox_from_det(full_det_list) working_pred = np.array([(f_h // 2) - 112, (f_w // 2) - 112, (f_h // 2) + 112, (f_w // 2) + 112]) faces = [] for frame, box in zip(frames, bbox): all_faces = [] for face_det in box: best_pred = face_det best_pred[[0, 1]] -= margin // 2 best_pred[[2, 3]] += (margin + 1) // 2 try: cropped_faces = crop_face(frame, best_pred, size=size) working_pred = best_pred except: cropped_faces = crop_face(frame, working_pred, size=size) all_faces.append(cropped_faces) faces.append(all_faces) return faces def get_faces_frames2(path, frame_rngs, jumps=4, margin=30, mtcnn=None, size: "(h,w)" = (224, 224)): # for height and width cap = cv2.VideoCapture(path) f_h, f_w = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), int( cap.get(cv2.CAP_PROP_FRAME_WIDTH)) n_h, n_w = f_h // 2, f_w // 2 cap.release() # non_overlapping_rngs = non_overlapping_ranges(frame_rngs) idx2face = defaultdict(lambda: None) idx2frame = defaultdict(lambda: None) if mtcnn is None: mtcnn = MTCNN(select_largest=False, device=device,) # getting video frames in one shot all_frames_idx = [] for rng in non_overlapping_rngs: all_frames_idx.extend(range(rng[0], rng[1])) vid_frames = list(get_frames_from_path(path, all_frames_idx)) for i, frame in zip(all_frames_idx, vid_frames): idx2frame[i] = frame # getting detection in one shot all_detect_idx = [] for frame_rng in non_overlapping_rngs: all_detect_idx.extend(range(frame_rng[0], frame_rng[1], jumps)) all_detect_small_frames = [cv2.resize(frame, (n_w, n_h)) for i, frame in zip( all_frames_idx, vid_frames) if i in all_detect_idx] det, conf = mtcnn.detect(all_detect_small_frames) idx2det = defaultdict(lambda: None) for i, det in zip(all_detect_idx, det): idx2det[i] = det # face crop for each non-overlapping range for frame_rng in non_overlapping_rngs: start, end = frame_rng frame_idx = list(range(start, end)) detect_idx = list(range(start, end, jumps)) frames = [idx2frame[i] for i in frame_idx] det_list = [idx2det[i] for i in detect_idx] faces = _faces_from_det( frame_idx, detect_idx, frames, det_list, f_h, f_w, margin=margin, size=size, mtcnn=mtcnn) for i, face in zip(frame_idx, faces): idx2face[i] = face # distribution to each range rng_faces = [] for rng in frame_rngs: curr_rng_faces = [] for i in range(rng[0], rng[1]): curr_rng_faces.append(idx2face[i]) rng_faces.append(curr_rng_faces) return rng_faces
/setup.py
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="maruti", version="1.3.4", author="Ankit Saini", author_email="ankitsaini100205@gmail.com", description="Maruti Library", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/ankitsainidev/maruti", download_url='https://github.com/ankitsainidev/maruti/archive/v1.3.tar.gz', packages=['maruti', 'maruti.vision', 'maruti.deepfake', 'maruti.torch', 'maruti.imports'], package_dir={'maruti': 'maruti'}, package_data={'maruti': ['deepfake/data/*/*']}, include_package_data=True, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.5', install_requires=['tqdm==4.40.2', 'opencv-python', 'facenet_pytorch'] )
/tests/test_sizes.py
import unittest import tempfile import os from maruti import sizes class DeepfakeTest(unittest.TestCase): def test_byte_to_mb(self): self.assertEqual(sizes.byte_to_mb(1024*1024), 1) self.assertAlmostEqual(sizes.byte_to_mb(1024), 0.0009765624, delta=1e-8) def test_sizes(self): with tempfile.TemporaryDirectory() as dir: # dir test sizes.dir_size(dir) sizes.dir_size() # file test with open(os.path.join(dir, 'test_file.txt'), 'w') as f: f.write("It's a test") sizes.file_size(os.path.join(dir, 'test_file.txt')) # var test sizes.var_size(dir)
/tests/test_utils.py
import maruti import unittest import tempfile from maruti import utils import os class UtilsTests(unittest.TestCase): def test_read_write_json(self): with tempfile.TemporaryDirectory() as dir: # creating dictionary sample = {'h': 3, 'd': {'j': 4}} path = os.path.join(dir, 'test.json') # writing to file utils.write_json(sample, path) # reading same file sample_read = utils.read_json(path) self.assertEqual(sample, sample_read)
/tests/torch/test_utils.py
import unittest import tempfile import os import torchvision import torch from maruti.torch import utils class TorchUtilsTest(unittest.TestCase): def setUp(self): self.model = torchvision.models.resnet18(False) def tearDown(self): self.model = None def test_freeze_unfreeze(self): utils.freeze(self.model) for param in self.model.parameters(): self.assertFalse(param.requires_grad) utils.unfreeze(self.model) for param in self.model.parameters(): self.assertTrue(param.requires_grad) def test_layer_freeze_unfreeze(self): layers = ['fc.weight', 'layer1.0', 'layer2', 'layer1', 'layer3.0'] utils.freeze_layers(self.model, layers) for name, layer in self.model.named_parameters(): tested = False for to_freeze in layers: if name.startswith(to_freeze): tested = True self.assertFalse(layer.requires_grad) if not tested: self.assertTrue(layer.requires_grad) utils.unfreeze_layers(self.model, layers) for param in self.model.parameters(): self.assertTrue(param.requires_grad) def test_children_names(self): names = utils.children_names(self.model) layers = {'fc', 'layer1', 'layer2', 'layer3', 'layer4', 'conv1', 'bn1', 'relu', 'maxpool', 'avgpool'} self.assertEquals(len(names), len(layers)) for name in names: self.assertTrue(name in layers)
/tests/vision/test_image.py
import unittest import cv2 from cv2 import dnn_Net from maruti.vision import image import os TEST_DATA_PATH = 'test_data' class ImageTests(unittest.TestCase): def setUp(self): self.img_path = os.path.join(TEST_DATA_PATH, 'img1.jpeg') self.img = cv2.imread(self.img_path) def test_create_net(self): self.assertIsInstance(image.create_net(), dnn_Net) def test_brightness_score(self): self.assertAlmostEqual( image.brightness_score(self.img), 1.76, delta=1e-2) def test_adjust_brightness(self): brightness = image.brightness_score(self.img) new_img = image.adjust_brightness(self.img, 2*brightness) self.assertGreaterEqual(image.brightness_score(new_img), brightness) def test_crop_around_point(self): h, w = self.img.shape[:2] points = [(0, 0), (h-1, w-1), (h//2, w//2)] sizes = [(224, 224), (160, 160), (3000, 4000)] for point in points: for size in sizes: cropped = image.crop_around_point(self.img, point, size) self.assertEqual(size, cropped.shape[:2]) def test_get_face_center(self): old_brightness = image.brightness_score(self.img) (x, y), brightness = image.get_face_center(self.img) self.assertEqual(old_brightness, brightness) def test_detect_sized_rescaled_face(self): sizes = [(224, 224), (160, 160), (3000, 4000)] for size in sizes[::-1]: face = image.detect_sized_rescaled_face(self.img, size,rescale_factor=2) self.assertEqual(size, face.shape[:2])
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
RainGod6/SDET11-LY
refs/heads/master
{"/test_appium_page_object/page/app.py": ["/test_appium_page_object/page/main.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/main.py": ["/test_appium_page_object/page/apply_etc_card/apply_credit_card.py", "/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/page/apply_etc_card/apply_credit_card.py": ["/test_appium_page_object/page/base_page.py"], "/test_appium_page_object/testcase/test_apply_credit_card.py": ["/test_appium_page_object/page/app.py"]}
└── ├── leetcode │ └── Solution.py ├── test_appium │ ├── YMM_APP │ │ ├── TestYmm.py │ │ └── TestYmmPY.py │ ├── hcb_app │ │ ├── test_hcb_demo.py │ │ └── test_hcb_home.py │ ├── testApiDemo.py │ └── test_xueqiu.py ├── test_appium_page_object │ ├── page │ │ ├── app.py │ │ ├── apply_etc_card │ │ │ └── apply_credit_card.py │ │ ├── base_page.py │ │ └── main.py │ └── testcase │ └── test_apply_credit_card.py ├── test_yaml │ └── test_yaml.py └── unit └── test_unit.py
/leetcode/Solution.py
""" 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。 你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。 示例: 给定 nums = [2, 7, 11, 15], target = 9 因为 nums[0] + nums[1] = 2 + 7 = 9 所以返回 [0, 1] 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/two-sum """ from typing import List class Solution(object): def twoSum(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ a = [] for i in range(len(nums)): for j in range(1, len(nums)): if nums[i] + nums[j] == target: if i not in a: a.append(i) a.append(j) return a if __name__ == "__main__": s = Solution() nums = [3,2,4] b = 6 result = s.twoSum(nums, b) print(result)
/test_appium/YMM_APP/TestYmm.py
# This sample code uses the Appium python client # pip install Appium-Python-Client # Then you can paste this into a file and simply run with Python from appium import webdriver from appium.webdriver.common.mobileby import MobileBy caps = {} caps["platformName"] = "android" caps["deviceName"] = "xiaomi5" caps["appPackage"] = "com.xiwei.logistics" caps["appActivity"] = "com.xiwei.logistics.carrier.ui.CarrierMainActivity" # caps["noReset"] = True driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) # 同意协议 el1 = driver.find_element_by_id("com.xiwei.logistics:id/dialog_btn_right") el1.click() # 同意NFC授权,需要等待20s driver.implicitly_wait(25) el2 = driver.find_element(MobileBy.ID, "android:id/button1") el2.click() # 点击知道了弹框 el3 = driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/buttons_layout") # el3 = driver.find_element(MobileBy.XPATH, "//*[@text='知道了' and contains(@resource-id,'com.xiwei.logistics:id/buttons_layout')]") el3.click() # 关闭广告弹窗x按钮 driver.implicitly_wait(15) el4 = driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/iv_close") el4.click()
/test_appium/YMM_APP/TestYmmPY.py
# This sample code uses the Appium python client # pip install Appium-Python-Client # Then you can paste this into a file and simply run with Python from time import sleep from appium import webdriver from appium.webdriver.common.mobileby import MobileBy from appium.webdriver.common.touch_action import TouchAction from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait class TestYmmAPP: def setup(self): caps = {} caps["platformName"] = "android" caps["deviceName"] = "xiaomi5" caps["appPackage"] = "com.xiwei.logistics" caps["appActivity"] = "com.xiwei.logistics.carrier.ui.CarrierMainActivity" # caps["noReset"] = True self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) self.driver.implicitly_wait(15) # 全局隐式等待 # 同意协议 self.driver.find_element_by_id("com.xiwei.logistics:id/dialog_btn_right").click() # 加入显示等待机制,因为此处页面元素呈现较慢,需要等待20s WebDriverWait(self.driver, 20).until(expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) # 同意NFC授权 self.driver.find_element(MobileBy.ID, "android:id/button1").click() # 点击知道了弹框 self.driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/buttons_layout").click() # 关闭广告弹窗x按钮 WebDriverWait(self.driver, 10).until( expected_conditions.element_to_be_clickable((MobileBy.ID, "com.xiwei.logistics:id/iv_close"))) self.driver.find_element(MobileBy.ID, "com.xiwei.logistics:id/iv_close").click() def test_etchome(self): # page_source方法返回页面xml结构 # print(self.driver.page_source) tab = "// *[@text='服务']/../../.." # 父节点 tab1 = "//*[contains(@resource-id,'ll_tab_container')]" # 模糊匹配:使用contains tab2 = "//*[contains(@resource-id,'tv_tab') and @text='服务']" # 使用多表达式组合 and # 点击服务,进入满帮服务首页 self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_tab') and @text='服务']").click() # 滑动屏幕 action = TouchAction(self.driver) window_rect = self.driver.get_window_rect() print(window_rect) width = window_rect['width'] height = window_rect['height'] for i in range(3): action.press(x=width * 1 / 2, y=height * 5 / 6).wait(2000).move_to(x=width * 1 / 2, y=height * 1 / 6).release().perform() # 再滑动回至原位置 for i in range(3): action.press(x=width * 1 / 2, y=height * 1 / 6).wait(2000).move_to(x=width * 1 / 2, y=height * 5 / 6).release().perform() etc_tab = "//*[@text='ETC']" self.driver.find_element(MobileBy.XPATH, etc_tab).click() WebDriverWait(self.driver, 15).until( expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) # 点击NFC授权 self.driver.find_element(MobileBy.ID, "android:id/button1").click() quick_appaly_image = "//*[contains(@resource-id,'ll_online_open_card')]" assert not (self.driver.find_element(MobileBy.XPATH, quick_appaly_image).get_attribute( "resourceId")) != "com.wlqq.phantom.plugin.etc:id/ll_online_open_card" def test_apply_card(self): self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_tab') and @text='服务']").click() etc_tab = "//*[@text='ETC']" self.driver.find_element(MobileBy.XPATH, etc_tab).click() WebDriverWait(self.driver, 15).until( expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) # 点击NFC授权 self.driver.find_element(MobileBy.ID, "android:id/button1").click() quick_appaly_image = "//*[contains(@resource-id,'ll_online_open_card')]" assert (self.driver.find_element(MobileBy.XPATH, quick_appaly_image).get_attribute( "resourceId")) == "com.wlqq.phantom.plugin.etc:id/ll_online_open_card" # 点击快速开卡 WebDriverWait(self.driver, 15).until( expected_conditions.element_to_be_clickable((MobileBy.XPATH, "//*[@text='快速开卡']"))) self.driver.find_element(MobileBy.XPATH, "//*[@text='快速开卡']").click() # 点击返回 self.driver.find_element(MobileBy.ID, 'com.xiwei.logistics:id/btn_back').click() WebDriverWait(self.driver, 30).until(lambda x: len(self.driver.contexts) > 1) print(self.driver.contexts) def test_ui_selector(self): self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_tab') and @text='服务']").click() # 利用ui_selector滑动查找元素进行定位 self.driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().' 'scrollable(true).instance(0)).' 'scrollIntoView(new UiSelector().textContains("去卖车").' 'instance(0));').click() # 加入显示等待,新调转的页面是webview,后面需要修改断言代码 WebDriverWait(self.driver, 10).until(expected_conditions.visibility_of_element_located(MobileBy.ID, "com.xiwei.logistics:id/tv_title")) assert self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'tv_title')]").\ get_attribute('text') == '我要卖车' def test_etc_services(self): etc_service_more = "//*[@text='ETC服务']//*[@text='查看更多']" etc_service_apply_credit_card = "//*[@text='ETC服务']//*[contains(@text,'全国记账卡')]//*[@text='去办卡']" etc_service_apply_stored_card = "//*[@text='ETC服务']//*[contains(@text,'全国储值卡')]//*[@text='去办卡']" def test_etc_apply_card(self): quick_apply = "//*[contains(@resource-id,'pager_banner')][1]" apply_card_tab = " " def teardown(self): # self.driver.quit() pass
/test_appium/hcb_app/test_hcb_demo.py
from time import sleep from appium import webdriver from appium.webdriver.common.mobileby import MobileBy from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait class TestHcbDemo: def setup(self): caps = {} caps['platformName'] = 'android' caps['deviceName'] = '28d6f388' caps["appPackage"] = "com.wlqq" caps["appActivity"] = ".activity.HomeActivity" caps["automationname"] = "uiautomator2" caps["chromedriverExecutable"] = "/Users/user/tool/chromedriver/2.35/chromedriver" self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) self.driver.implicitly_wait(10) self.driver.find_element(MobileBy.ID, "com.wlqq:id/dialog_btn_right").click() WebDriverWait(self.driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) self.driver.find_element(MobileBy.ID, "android:id/button1").click() self.driver.find_element(MobileBy.XPATH, "//*[@text='知道了']").click() WebDriverWait(self.driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.XPATH, "//*[contains(@resource-id,'content')]\ //*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']"))) self.driver.find_element(MobileBy.XPATH, "//*[contains(@resource-id,'content')]\ //*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']").click() def test_etc_home(self): self.driver.find_element(MobileBy.XPATH, "//*[@text='ETC']").click() self.driver.find_element(MobileBy.XPATH, "//*[@text='快速办卡']").click() def test_webview(self): self.driver.find_element(MobileBy.XPATH, "//*[@text='ETC']").click() WebDriverWait(self.driver, 20).until(expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) self.driver.find_element(MobileBy.ID, "android:id/button1").click() WebDriverWait(self.driver, 15).until(expected_conditions.element_to_be_clickable((MobileBy.ID, 'com.wlqq.phantom.plugin.etc:id/tv_online_open_card'))) self.driver.find_element(MobileBy.ID, "com.wlqq.phantom.plugin.etc:id/tv_online_open_card").click() print(self.driver.contexts) self.driver.find_element(MobileBy.ID, "com.wlqq:id/btn_back").click() # 打印当前页面结构page_source,当前xml结构 # print(self.driver.page_source) # 等待上下文出现,webview出现 WebDriverWait(self.driver, 20).until(lambda x: (len(self.driver.contexts) > 1)) # 切换至webview容器 self.driver.switch_to.context(self.driver.contexts[-1]) # 打印当前页面结构page_source,当前html结构 print(self.driver.page_source) self.driver.find_element(By.CSS_SELECTOR, ".button-container.fixed-button").click() # webview中toast定位获取到div中的id属性 toast = self.driver.find_element(By.CSS_SELECTOR, "#goblin-toast").text print(toast) assert "未选择车牌" in toast print(self.driver.contexts) # self.driver.switch_to.context(self.driver.contexts['NATIVE_APP']) self.driver.find_element(MobileBy.ID, "com.wlqq:id/back_btn").click() def teardown(self): pass
/test_appium/hcb_app/test_hcb_home.py
from time import sleep from appium import webdriver from appium.webdriver.common.mobileby import MobileBy class TestHcb: def setup(self): caps = {} caps['platformName'] = 'android' caps['deviceName'] = '28d6f388' caps['appPackage'] = 'com.wlqq' caps['appActivity'] = 'com.wlqq.activity.HomeActivity' self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) self.driver.implicitly_wait(5) # 启动APP点击同意 el1 = self.driver.find_element(MobileBy.ID, 'com.wlqq:id/dialog_btn_right') el1.click() self.driver.implicitly_wait(15) # 点击首页温馨提示 el2 = self.driver.find_element(MobileBy.ID, 'com.wlqq:id/text_positive') el2.click() self.driver.implicitly_wait(20) # 隐式等待:在设置超时时间范围内,一直寻找元素,若在时间内找到则立即执行后面操作,若时间内未找到则抛出异常 # 点击NFC授权 el3 = self.driver.find_element(MobileBy.ID, 'android:id/button1') el3.click() def test_etc_home(self): e2 = self.driver.find_element(MobileBy.XPATH, '//android.widget.ImageView[@text="ETC"]') e2.click() print(e2.get_attribute('text')) print("点击ETC服务完成,进入ETC插件首页") # print(self.driver.page_source) assert 'ETC' == e2.get_attribute('text') def test_hcb_home(self): el1 = self.driver.find_element(MobileBy.XPATH, '//android.view.View[@text="ETC服务"]') el1.click() def teardown(self): sleep(20) # 强制等待 self.driver.quit()
/test_appium/testApiDemo.py
from appium import webdriver from appium.webdriver.common.mobileby import MobileBy class TestApiDemo: def setup(self): caps = {} caps['platformName'] = "android" caps['deviceName'] = "小米5" caps['appPackage'] = "io.appium.android.apis" caps['appActivity'] = ".ApiDemos" self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) self.driver.implicitly_wait(10) def test_toast(self): self.driver.find_element(MobileBy.XPATH, "//*[@text='Views' and contains(@resource-id,'text1')]").click() self.driver.find_element(MobileBy.ANDROID_UIAUTOMATOR, 'new UiScrollable(new UiSelector().' 'scrollable(true).instance(0)).' 'scrollIntoView(new UiSelector().textContains("Popup Menu").' 'instance(0));').click() self.driver.find_element(MobileBy.ACCESSIBILITY_ID, 'Make a Popup!').click() self.driver.find_element(MobileBy.XPATH, "//*[@text='Search']").click() # toast定位,由于toast短暂最好用变量存下来 toast = self.driver.find_element(MobileBy.XPATH, "//*[@class='android.widget.Toast']").text print(toast) assert 'Clicked' in toast assert 'popup menu' in toast assert 'API Demos:Clicked popup menu item Search' == toast def teardown(self): pass
/test_appium/test_xueqiu.py
# This sample code uses the Appium python client # pip install Appium-Python-Client # Then you can paste this into a file and simply run with Python from time import sleep from appium import webdriver from appium.webdriver.common.mobileby import MobileBy from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait class TestXueQiu: def setup(self): caps = {} caps["platformName"] = "android" caps["deviceName"] = "test1" caps["appPackage"] = "com.xueqiu.android" caps["appActivity"] = ".view.WelcomeActivityAlias" caps["chromedriverExecutable"] = "/Users/user/tool/chromedriver/2.20/chromedriver" self.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable((MobileBy.ID, 'com.xueqiu.android:id/tv_agree'))) self.driver.find_element(MobileBy.ID, 'com.xueqiu.android:id/tv_agree').click() self.driver.implicitly_wait(10) def test_webview_context(self): self.driver.find_element(MobileBy.XPATH, "//*[@text='交易' and contains(@resource-id,'tab_name')]").click() # WebDriverWait(self.driver, 15).until(lambda x: len(self.driver.contexts) > 1) for i in range(5): print(self.driver.contexts) sleep(1) print(self.driver.page_source) self.driver.switch_to.context(self.driver.contexts[-1]) print(self.driver.contexts) print(self.driver.page_source) def teardown(self): sleep(20) self.driver.quit()
/test_appium_page_object/page/app.py
from appium import webdriver from appium.webdriver.common.mobileby import MobileBy from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait from test_appium_page_object.page.base_page import BasePage from test_appium_page_object.page.main import Main class App(BasePage): _appPackage = "com.wlqq" _appActivity = ".activity.HomeActivity" def start(self): if self._driver is None: caps = {} caps['platformName'] = 'android' caps['deviceName'] = '28d6f388' caps["appPackage"] = self._appPackage caps["appActivity"] = self._appActivity caps["automationname"] = "uiautomator2" caps["chromedriverExecutable"] = "/Users/user/tool/chromedriver/2.35/chromedriver" self._driver = webdriver.Remote("http://localhost:4723/wd/hub", caps) self._driver.implicitly_wait(10) self.find(MobileBy.ID, "com.wlqq:id/dialog_btn_right").click() return self else: self._driver.start_activity(self._appPackage, self._appActivity) def restart(self): pass def stop(self): pass # 类型提示 -> def main(self) -> Main: # todo: wait main page WebDriverWait(self._driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) self.find(MobileBy.ID, "android:id/button1").click() self.find(MobileBy.XPATH, "//*[@text='知道了']").click() WebDriverWait(self._driver, 30).until(expected_conditions.element_to_be_clickable((MobileBy.XPATH, "//*[contains(@resource-id,'content')]\ //*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']"))) self.find(MobileBy.XPATH, "//*[contains(@resource-id,'content')]\ //*[@class ='android.widget.FrameLayout']//*[@class='android.widget.ImageView']").click() # WebDriverWait(self._driver, 30).until(lambda x: "ETC" in self._driver.page_source) # 等待首页元素出现完成加载 return Main(self._driver)
/test_appium_page_object/page/apply_etc_card/apply_credit_card.py
from appium.webdriver.common.mobileby import MobileBy from test_appium_page_object.page.base_page import BasePage class ApplyCreditCard(BasePage): _name_apply_card_element = (MobileBy.ID, "com.wlqq.phantom.plugin.etc:id/tv_online_open_card") _name_nfc_element = (MobileBy.ID, "com.wlqq:id/btn_back") def apply_credit_card(self): self.find(self._name_apply_card_element).click() self.find(self._name_nfc_element).click() return self def goto_faq(self): pass def goto_bind_card(self): pass def goto_obu(self): pass
/test_appium_page_object/page/base_page.py
import yaml from appium.webdriver import WebElement from appium.webdriver.webdriver import WebDriver import logging class BasePage: logging.basicConfig(level=logging.INFO) # 使用logging _driver: WebDriver _black_list = [] _error_max = 5 _error_count = 0 def __init__(self, driver: WebDriver = None): self._driver = driver # todo:当有广告、各种异常弹框出现的时候,要进行异常处理,通常用装饰器进行异常处理 def find(self, locator, value: str = None): logging.info(locator, value) try: # 寻找控件 element = self._driver.find_element(*locator) if isinstance(locator, tuple) else self._driver.find_element( locator, value) # 如果成功,清空错误计数 self._error_count = 0 return element # todo:self._error_max = 0 except Exception as e: # 如果次数太多,就退出异常处理,直接报错 if self._error_count > self._error_max: raise e # 记录一直异常的次数 self._error_max += 1 # 对黑名单弹框进行处理 for element in self._black_list: elements = self._driver.find_elements(*element) if len(elements) > 0: elements[0].click() # 继续寻找原来正常的控件,使用递归 return self.find(locator, value) # 如果黑名单也没找到,就报错 logging.warn("black list no found") raise e def steps(self, path): with open(path) as f: # 读取步骤定义文件 steps: list[dict] = yaml.safe_load(f) # 保存一个目标对象 element: WebElement = None for step in steps: logging.info(step) if "by" in step.keys(): element = self.find(step["by"], step["locator"]) if "action" in step.keys(): action = step["action"] if action == "find": pass elif action == "click": element.click() elif action == "text": element.text() elif action == "attribute": element.get_attribute(step["value"]) elif action in ["send", "input"]: content: str = step["value"] for key in self._params.keys(): content = content.replace("{%s}" % key, self._params[key]) element.send_keys(content)
/test_appium_page_object/page/main.py
from appium.webdriver.common.mobileby import MobileBy from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait from test_appium_page_object.page.apply_etc_card.apply_credit_card import ApplyCreditCard from test_appium_page_object.page.base_page import BasePage class Main(BasePage): def goto_etc_home(self): self.find(MobileBy.XPATH, "//*[@text='ETC']").click() WebDriverWait(self._driver, 20).until(expected_conditions.element_to_be_clickable((MobileBy.ID, "android:id/button1"))) self.find(MobileBy.ID, "android:id/button1").click() return ApplyCreditCard(self._driver) def goto_etc_services_more(self): pass def goto_profile(self): pass def goto_message(self): pass
/test_appium_page_object/testcase/test_apply_credit_card.py
from test_appium_page_object.page.app import App class TestApplyCreditCard: def setup(self): self.main = App().start().main() def test_apply_credit_card(self): self.main.goto_etc_home().apply_credit_card()
/test_yaml/test_yaml.py
import pytest import yaml class TestYaml: def test_yaml(self): print(yaml.load(""" - Hesperiidae - Papilionidae - Apatelodidae - Epiplemidae """)) @pytest.mark.parametrize("a,b", yaml.safe_load(open("testyaml.yaml", encoding='utf-8'))) def test_yaml_read(self, a, b): assert a + b == 10
/unit/test_unit.py
import unittest class TestSum(unittest.TestCase): def test_sum(self): assert 1 + 2 == 3
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
aharley/neural_3d_mapping
refs/heads/master
{"/model_carla_static.py": ["/nets/emb3dnet.py", "/nets/viewnet.py", "/nets/emb2dnet.py"], "/main.py": ["/model_carla_static.py", "/model_carla_ego.py", "/model_carla_det.py"], "/model_carla_det.py": ["/nets/detnet.py"], "/model_carla_ego.py": ["/nets/egonet.py"]}
└── ├── archs │ ├── bottle2D.py │ ├── encoder2d.py │ ├── encoder3d.py │ └── pixelshuffle3d.py ├── backend │ └── saverloader.py ├── exp_base.py ├── exp_carla_det.py ├── exp_carla_ego.py ├── exp_carla_static.py ├── hyperparams.py ├── main.py ├── model_carla_det.py ├── model_carla_ego.py ├── model_carla_static.py ├── nets │ ├── detnet.py │ ├── egonet.py │ ├── emb2dnet.py │ ├── emb3dnet.py │ ├── flownet.py │ └── viewnet.py └── pretrained_nets_carla.py
/archs/bottle2D.py
import torch import torch.nn as nn import time # import hyperparams as hyp # from utils_basic import * import torch.nn.functional as F class Bottle2D(nn.Module): def __init__(self, in_channel, pred_dim, chans=64): super(Bottle2D, self).__init__() conv2d = [] # self.out_chans = [chans, 2*chans, 4*chans, 8*chans, 16*chans] # self.out_chans = [chans, 2*chans, 4*chans, 8*chans] self.out_chans = [chans, 2*chans, 4*chans] n_layers = len(self.out_chans) for i in list(range(n_layers)): if i==0: in_dim = in_channel else: in_dim = self.out_chans[i-1] out_dim = self.out_chans[i] conv2d.append(nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=0), nn.LeakyReLU(), nn.BatchNorm2d(num_features=out_dim), )) self.conv2d = nn.ModuleList(conv2d) hidden_dim = 1024 self.linear_layers = nn.Sequential( nn.Linear(self.out_chans[-1]*2*2*2, hidden_dim), nn.LeakyReLU(), nn.Linear(hidden_dim, pred_dim), ) def forward(self, feat): B, C, Z, X = list(feat.shape) # print(feat.shape) for conv2d_layer in self.conv2d: feat = conv2d_layer(feat) # print('bottle', feat.shape) feat = feat.reshape(B, -1) # print('bottle', feat.shape) # feat = self.linear_layers(feat) return feat class ResNetBottle2D(nn.Module): def __init__(self, in_channel, pred_dim, chans=64): super(ResNetBottle2D, self).__init__() # first lqyer - downsampling in_dim, out_dim, ksize, stride, padding = in_channel, chans, 4, 2, 1 self.down_sampler0 = nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), ) in_dim, out_dim, ksize, stride, padding = chans, chans, 3, 1, 1 self.res_block1 = self.generate_block(in_dim, out_dim, ksize, stride, padding) self.res_block2 = self.generate_block(in_dim, out_dim, ksize, stride, padding) self.res_block3 = self.generate_block(in_dim, out_dim, ksize, stride, padding) self.res_block4 = self.generate_block(in_dim, out_dim, ksize, stride, padding) self.down_sampler1 = nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), ) self.down_sampler2 = nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), ) self.down_sampler3 = nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), ) self.down_sampler4 = nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), ) self.lrelu = nn.LeakyReLU() # # final 1x1x1 conv to get our desired pred_dim # self.final_feature = nn.Conv2d(in_channels=chans, out_channels=pred_dim, kernel_size=1, stride=1, padding=0) self.linear_layers = nn.Sequential( nn.Linear(out_dim*2*2*2, 512), nn.LeakyReLU(), nn.Linear(64, pred_dim), ) def generate_block(self, in_dim, out_dim, ksize, stride, padding): block = nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(num_features=out_dim), nn.LeakyReLU(), nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1), nn.BatchNorm2d(num_features=out_dim), ) return block def forward(self, feat): B, C, Z, Y, X = list(feat.shape) feat = self.down_sampler0(feat) # print(feat.shape) feat_before = feat feat_after = self.res_block1(feat) feat = feat_before + feat_after feat = self.lrelu(feat) feat = self.down_sampler1(feat) # print(feat.shape) feat_before = feat feat_after = self.res_block2(feat) feat = feat_before + feat_after feat = self.lrelu(feat) feat = self.down_sampler2(feat) # print(feat.shape) feat_before = feat feat_after = self.res_block3(feat) feat = feat_before + feat_after feat = self.lrelu(feat) feat = self.down_sampler3(feat) # print(feat.shape) feat_before = feat feat_after = self.res_block4(feat) feat = feat_before + feat_after feat = self.lrelu(feat) feat = self.down_sampler4(feat) print(feat.shape) feat = feat.reshape(B, -1) feat = self.linear_layers(feat) # print(feat.shape) return feat
/archs/encoder2d.py
import torch import torch.nn as nn # import hyperparams as hyp # from utils_basic import * class Skipnet2d(nn.Module): def __init__(self, in_chans, mid_chans=64, out_chans=1): super(Skipnet2d, self).__init__() conv2d = [] conv2d_transpose = [] up_bn = [] self.down_in_dims = [in_chans, mid_chans, 2*mid_chans] self.down_out_dims = [mid_chans, 2*mid_chans, 4*mid_chans] self.down_ksizes = [3, 3, 3] self.down_strides = [2, 2, 2] padding = 1 for i, (in_dim, out_dim, ksize, stride) in enumerate(zip(self.down_in_dims, self.down_out_dims, self.down_ksizes, self.down_strides)): conv2d.append(nn.Sequential( nn.Conv2d(in_channels=in_dim, out_channels=out_dim, kernel_size=ksize, stride=stride, padding=padding), nn.LeakyReLU(), nn.BatchNorm2d(num_features=out_dim), )) self.conv2d = nn.ModuleList(conv2d) self.up_in_dims = [4*mid_chans, 6*mid_chans] self.up_bn_dims = [6*mid_chans, 3*mid_chans] self.up_out_dims = [4*mid_chans, 2*mid_chans] self.up_ksizes = [4, 4] self.up_strides = [2, 2] padding = 1 # Note: this only holds for ksize=4 and stride=2! print('up dims: ', self.up_out_dims) for i, (in_dim, bn_dim, out_dim, ksize, stride) in enumerate(zip(self.up_in_dims, self.up_bn_dims, self.up_out_dims, self.up_ksizes, self.up_strides)): conv2d_transpose.append(nn.Sequential( nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim, kernel_size=ksize, stride=stride, padding=padding), nn.LeakyReLU(), )) up_bn.append(nn.BatchNorm2d(num_features=bn_dim)) # final 1x1x1 conv to get our desired out_chans self.final_feature = nn.Conv2d(in_channels=3*mid_chans, out_channels=out_chans, kernel_size=1, stride=1, padding=0) self.conv2d_transpose = nn.ModuleList(conv2d_transpose) self.up_bn = nn.ModuleList(up_bn) def forward(self, inputs): feat = inputs skipcons = [] for conv2d_layer in self.conv2d: feat = conv2d_layer(feat) skipcons.append(feat) skipcons.pop() # we don't want the innermost layer as skipcon for i, (conv2d_transpose_layer, bn_layer) in enumerate(zip(self.conv2d_transpose, self.up_bn)): feat = conv2d_transpose_layer(feat) feat = torch.cat([feat, skipcons.pop()], dim=1) # skip connection by concatenation feat = bn_layer(feat) feat = self.final_feature(feat) return feat if __name__ == "__main__": net = Skipnet2d(in_chans=4, mid_chans=32, out_chans=3) print(net.named_parameters) inputs = torch.rand(2, 4, 128, 384) out = net(inputs) print(out.size())
/archs/encoder3d.py
import torch import torch.nn as nn import time import torch.nn.functional as F import archs.pixelshuffle3d class Skipnet3d(nn.Module): def __init__(self, in_dim, out_dim, chans=64): super(Skipnet3d, self).__init__() conv3d = [] up_bn = [] # batch norm for deconv conv3d_transpose = [] self.down_in_dims = [in_dim, chans, 2*chans]#, 4*chans] self.down_out_dims = [chans, 2*chans, 4*chans, 8*chans] self.down_ksizes = [4, 4, 4, 4] self.down_strides = [2, 2, 2, 2] padding = 1 # print('down dims: ', self.down_out_dims) for i, (in_chan, out_chan, ksize, stride) in enumerate(zip(self.down_in_dims, self.down_out_dims, self.down_ksizes, self.down_strides)): conv3d.append(nn.Sequential( nn.ReplicationPad3d(padding), nn.Conv3d(in_channels=in_chan, out_channels=out_chan, kernel_size=ksize, stride=stride, padding=0), # nn.Conv3d(in_channels=in_chan, out_channels=out_chan, kernel_size=ksize, stride=stride, padding=padding), nn.LeakyReLU(), nn.BatchNorm3d(num_features=out_chan), )) self.conv3d = nn.ModuleList(conv3d) self.up_in_dims = [4*chans, 6*chans] self.up_out_dims = [4*chans, 4*chans] self.up_bn_dims = [6*chans, 5*chans] self.up_ksizes = [4, 4] self.up_strides = [2, 2] padding = 1 # print('up dims: ', self.up_out_dims) for i, (in_chan, bn_dim, out_chan, ksize, stride) in enumerate(zip(self.up_in_dims, self.up_bn_dims, self.up_out_dims, self.up_ksizes, self.up_strides)): conv3d_transpose.append(nn.Sequential( nn.ConvTranspose3d(in_channels=in_chan, out_channels=out_chan, kernel_size=ksize, stride=stride, padding=padding), nn.LeakyReLU(), )) up_bn.append(nn.BatchNorm3d(num_features=bn_dim)) self.conv3d_transpose = nn.ModuleList(conv3d_transpose) self.up_bn = nn.ModuleList(up_bn) # final 1x1x1 conv to get our desired out_dim self.final_feature = nn.Conv3d(in_channels=self.up_bn_dims[-1], out_channels=out_dim, kernel_size=1, stride=1, padding=0) def forward(self, inputs): feat = inputs skipcons = [] for conv3d_layer in self.conv3d: feat = conv3d_layer(feat) skipcons.append(feat) skipcons.pop() # we don't want the innermost layer as skipcon for i, (conv3d_transpose_layer, bn_layer) in enumerate(zip(self.conv3d_transpose, self.up_bn)): # print('feat before up', feat.shape) feat = conv3d_transpose_layer(feat) feat = torch.cat([feat, skipcons.pop()], dim=1) #skip connection by concatenation # print('feat before bn', feat.shape) feat = bn_layer(feat) feat = self.final_feature(feat) return feat class Res3dBlock(nn.Module): def __init__(self, in_planes, out_planes, padding=1): super(Res3dBlock, self).__init__() self.res_branch = nn.Sequential( nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=1, padding=padding), nn.BatchNorm3d(out_planes), nn.ReLU(True), nn.Conv3d(out_planes, out_planes, kernel_size=3, stride=1, padding=padding), nn.BatchNorm3d(out_planes) ) assert(padding==1 or padding==0) self.padding = padding if in_planes == out_planes: self.skip_con = nn.Sequential() else: self.skip_con = nn.Sequential( nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=1, padding=0), nn.BatchNorm3d(out_planes) ) def forward(self, x): res = self.res_branch(x) # print('res', res.shape) skip = self.skip_con(x) if self.padding==0: # the data has shrunk a bit skip = skip[:,:,2:-2,2:-2,2:-2] # print('skip', skip.shape) return F.relu(res + skip, True) class Conv3dBlock(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(Conv3dBlock, self).__init__() self.conv = nn.Sequential( nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0), nn.BatchNorm3d(out_planes), nn.ReLU(True), ) def forward(self, x): return self.conv(x) class Pool3dBlock(nn.Module): def __init__(self, pool_size): super(Pool3dBlock, self).__init__() self.pool_size = pool_size def forward(self, x): return F.max_pool3d(x, kernel_size=self.pool_size, stride=self.pool_size) class Deconv3dBlock(nn.Module): def __init__(self, in_planes, out_planes): super(Deconv3dBlock, self).__init__() self.deconv = nn.Sequential( nn.ConvTranspose3d(in_planes, out_planes, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(), ) def forward(self, x): return self.deconv(x) class Resnet3d(nn.Module): def __init__(self, in_dim, out_dim, chans=32): super().__init__() self.encoder_layer0 = Res3dBlock(in_dim, chans) self.encoder_layer1 = Pool3dBlock(2) self.encoder_layer2 = Res3dBlock(chans, chans) self.encoder_layer3 = Res3dBlock(chans, chans) self.encoder_layer4 = Res3dBlock(chans, chans) self.encoder_layer5 = Pool3dBlock(2) self.encoder_layer6 = Res3dBlock(chans, chans) self.encoder_layer7 = Res3dBlock(chans, chans) self.encoder_layer8 = Res3dBlock(chans, chans) self.encoder_layer9 = Deconv3dBlock(chans, chans) self.final_layer = nn.Conv3d(in_channels=chans, out_channels=out_dim, kernel_size=1, stride=1, padding=0) def forward(self, x): x = self.encoder_layer0(x) x = self.encoder_layer1(x) x = self.encoder_layer2(x) x = self.encoder_layer3(x) x = self.encoder_layer4(x) x = self.encoder_layer5(x) x = self.encoder_layer6(x) x = self.encoder_layer7(x) x = self.encoder_layer8(x) x = self.encoder_layer9(x) x = self.final_layer(x) return x
/archs/pixelshuffle3d.py
''' reference: http://www.multisilicon.com/blog/a25332339.html ''' import torch.nn as nn class PixelShuffle3d(nn.Module): ''' This class is a 3d version of pixelshuffle. ''' def __init__(self, scale): ''' :param scale: upsample scale ''' super().__init__() self.scale = scale def forward(self, input): batch_size, channels, in_depth, in_height, in_width = input.size() nOut = channels // self.scale ** 3 out_depth = in_depth * self.scale out_height = in_height * self.scale out_width = in_width * self.scale input_view = input.contiguous().view(batch_size, nOut, self.scale, self.scale, self.scale, in_depth, in_height, in_width) output = input_view.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous() return output.view(batch_size, nOut, out_depth, out_height, out_width)
/backend/saverloader.py
import torch import os,pathlib import hyperparams as hyp import numpy as np def load_total(model, optimizer): start_iter = 0 if hyp.total_init: print("TOTAL INIT") print(hyp.total_init) start_iter = load(hyp.total_init, model, optimizer) if start_iter: print("loaded full model. resuming from iter %08d" % start_iter) else: print("could not find a full model. starting from scratch") return start_iter def load_weights(model, optimizer): if hyp.total_init: print("TOTAL INIT") print(hyp.total_init) start_iter = load(hyp.total_init, model, optimizer) if start_iter: print("loaded full model. resuming from iter %08d" % start_iter) else: print("could not find a full model. starting from scratch") else: # if (1): start_iter = 0 inits = {"feat2dnet": hyp.feat2d_init, "feat3dnet": hyp.feat3d_init, "viewnet": hyp.view_init, "detnet": hyp.det_init, "flownet": hyp.flow_init, "egonet": hyp.ego_init, "occnet": hyp.occ_init, } for part, init in list(inits.items()): # print('part', part) if init: if part == 'feat2dnet': model_part = model.feat2dnet elif part == 'feat3dnet': model_part = model.feat3dnet elif part == 'occnet': model_part = model.occnet elif part == 'flownet': model_part = model.flownet else: assert(False) if isinstance(model_part, list): for mp in model_part: iter = load_part([mp], part, init) else: iter = load_part(model_part, part, init) if iter: print("loaded %s at iter %08d" % (init, iter)) else: print("could not find a checkpoint for %s" % init) if hyp.reset_iter: start_iter = 0 return start_iter def save(model, checkpoint_dir, step, optimizer, keep_latest=3): model_name = "model-%08d.pth"%(step) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) prev_chkpts = list(pathlib.Path(checkpoint_dir).glob('model-*')) prev_chkpts.sort(key=lambda p: p.stat().st_mtime,reverse=True) if len(prev_chkpts) > keep_latest-1: for f in prev_chkpts[keep_latest-1:]: f.unlink() path = os.path.join(checkpoint_dir, model_name) torch.save({ 'step': step, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, path) print("Saved a checkpoint: %s"%(path)) def load(model_name, model, optimizer): print("reading full checkpoint...") # checkpoint_dir = os.path.join("checkpoints/", model_name) checkpoint_dir = os.path.join("saved_checkpoints/", model_name) step = 0 if not os.path.exists(checkpoint_dir): print("...ain't no full checkpoint here!") else: ckpt_names = os.listdir(checkpoint_dir) steps = [int((i.split('-')[1]).split('.')[0]) for i in ckpt_names] if len(ckpt_names) > 0: step = max(steps) model_name = 'model-%08d.pth' % (step) path = os.path.join(checkpoint_dir, model_name) print("...found checkpoint %s"%(path)) checkpoint = torch.load(path) # # Print model's state_dict # print("Model's state_dict:") # for param_tensor in model.state_dict(): # print(param_tensor, "\t", model.state_dict()[param_tensor].size()) # input() # # Print optimizer's state_dict # print("Optimizer's state_dict:") # for var_name in optimizer.state_dict(): # print(var_name, "\t", optimizer.state_dict()[var_name]) # input() model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) else: print("...ain't no full checkpoint here!") return step def load_part(model, part, init): print("reading %s checkpoint..." % part) init_dir = os.path.join("saved_checkpoints", init) print(init_dir) step = 0 if not os.path.exists(init_dir): print("...ain't no %s checkpoint here!"%(part)) else: ckpt_names = os.listdir(init_dir) steps = [int((i.split('-')[1]).split('.')[0]) for i in ckpt_names] if len(ckpt_names) > 0: step = max(steps) ind = np.argmax(steps) model_name = ckpt_names[ind] path = os.path.join(init_dir, model_name) print("...found checkpoint %s" % (path)) checkpoint = torch.load(path) model_state_dict = model.state_dict() # print(model_state_dict.keys()) for load_param_name, param in checkpoint['model_state_dict'].items(): model_param_name = load_param_name[len(part)+1:] # print('load_param_name', load_param_name, 'model_param_name', model_param_name) if part+"."+model_param_name != load_param_name: continue else: if model_param_name in model_state_dict.keys(): # print(model_param_name, load_param_name) # print('param in ckpt', param.data.shape) # print('param in state dict', model_state_dict[model_param_name].shape) model_state_dict[model_param_name].copy_(param.data) else: print('warning: %s is not in the state dict of the current model' % model_param_name) else: print("...ain't no %s checkpoint here!"%(part)) return step
/exp_base.py
import pretrained_nets_carla as pret_carla exps = {} groups = {} ############## training settings ############## groups['train_feat3d'] = [ 'do_feat3d = True', 'feat3d_dim = 32', # 'feat3d_smooth_coeff = 0.01', ] groups['train_det'] = [ 'do_det = True', 'det_prob_coeff = 1.0', 'det_reg_coeff = 1.0', ] ############## dataset settings ############## H = 128 W = 384 groups['seqlen1'] = [ 'trainset_seqlen = 1', 'valset_seqlen = 1', ] groups['8-4-8_bounds'] = [ 'XMIN = -8.0', # right (neg is left) 'XMAX = 8.0', # right 'YMIN = -4.0', # down (neg is up) 'YMAX = 4.0', # down 'ZMIN = -8.0', # forward (neg is backward) 'ZMAX = 8.0', # forward ] groups['16-4-16_bounds'] = [ 'XMIN = -16.0', # right (neg is left) 'XMAX = 16.0', # right 'YMIN = -4.0', # down (neg is up) 'YMAX = 4.0', # down 'ZMIN = -16.0', # forward (neg is backward) 'ZMAX = 16.0', # forward ] groups['16-8-16_bounds'] = [ 'XMIN = -16.0', # right (neg is left) 'XMAX = 16.0', # right 'YMIN = -8.0', # down (neg is up) 'YMAX = 8.0', # down 'ZMIN = -16.0', # forward (neg is backward) 'ZMAX = 16.0', # forward ] dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs" groups['carla_multiview_10_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'trainset = "mags7i3ten"', 'trainset_format = "multiview"', # 'trainset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] groups['carla_multiview_train_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'trainset = "mags7i3t"', 'trainset_format = "multiview"', # 'trainset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] groups['carla_multiview_test_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'testset = "mags7i3v"', 'testset_format = "multiview"', # 'testset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] groups['carla_multiview_train_val_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'trainset = "mags7i3t"', 'trainset_format = "multiview"', # 'trainset_seqlen = %d' % S, 'valset = "mags7i3v"', 'valset_format = "multiview"', # 'valset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] ############## other settings ############## groups['include_summs'] = [ 'do_include_summs = True', ] groups['decay_lr'] = ['do_decay_lr = True'] groups['clip_grad'] = ['do_clip_grad = True'] # groups['quick_snap'] = ['snap_freq = 500'] # groups['quicker_snap'] = ['snap_freq = 50'] # groups['quickest_snap'] = ['snap_freq = 5'] groups['snap500'] = ['snap_freq = 500'] groups['snap1k'] = ['snap_freq = 1000'] groups['snap5k'] = ['snap_freq = 5000'] groups['no_shuf'] = ['shuffle_train = False', 'shuffle_val = False', 'shuffle_test = False', ] groups['time_flip'] = ['do_time_flip = True'] groups['no_backprop'] = ['backprop_on_train = False', 'backprop_on_val = False', 'backprop_on_test = False', ] groups['train_on_trainval'] = ['backprop_on_train = True', 'backprop_on_val = True', 'backprop_on_test = False', ] groups['B1'] = ['trainset_batch_size = 1'] groups['B2'] = ['trainset_batch_size = 2'] groups['B4'] = ['trainset_batch_size = 4'] groups['B6'] = ['trainset_batch_size = 6'] groups['B8'] = ['trainset_batch_size = 8'] groups['B10'] = ['trainset_batch_size = 10'] groups['B12'] = ['trainset_batch_size = 12'] groups['B16'] = ['trainset_batch_size = 16'] groups['B24'] = ['trainset_batch_size = 24'] groups['B32'] = ['trainset_batch_size = 32'] groups['B64'] = ['trainset_batch_size = 64'] groups['B128'] = ['trainset_batch_size = 128'] groups['vB1'] = ['valset_batch_size = 1'] groups['vB2'] = ['valset_batch_size = 2'] groups['vB4'] = ['valset_batch_size = 4'] groups['vB8'] = ['valset_batch_size = 8'] groups['lr0'] = ['lr = 0.0'] groups['lr1'] = ['lr = 1e-1'] groups['lr2'] = ['lr = 1e-2'] groups['lr3'] = ['lr = 1e-3'] groups['2lr4'] = ['lr = 2e-4'] groups['5lr4'] = ['lr = 5e-4'] groups['lr4'] = ['lr = 1e-4'] groups['lr5'] = ['lr = 1e-5'] groups['lr6'] = ['lr = 1e-6'] groups['lr7'] = ['lr = 1e-7'] groups['lr8'] = ['lr = 1e-8'] groups['lr9'] = ['lr = 1e-9'] groups['lr12'] = ['lr = 1e-12'] groups['1_iters'] = ['max_iters = 1'] groups['2_iters'] = ['max_iters = 2'] groups['3_iters'] = ['max_iters = 3'] groups['5_iters'] = ['max_iters = 5'] groups['6_iters'] = ['max_iters = 6'] groups['9_iters'] = ['max_iters = 9'] groups['21_iters'] = ['max_iters = 21'] groups['7_iters'] = ['max_iters = 7'] groups['10_iters'] = ['max_iters = 10'] groups['15_iters'] = ['max_iters = 15'] groups['20_iters'] = ['max_iters = 20'] groups['25_iters'] = ['max_iters = 25'] groups['30_iters'] = ['max_iters = 30'] groups['50_iters'] = ['max_iters = 50'] groups['100_iters'] = ['max_iters = 100'] groups['150_iters'] = ['max_iters = 150'] groups['200_iters'] = ['max_iters = 200'] groups['250_iters'] = ['max_iters = 250'] groups['300_iters'] = ['max_iters = 300'] groups['397_iters'] = ['max_iters = 397'] groups['400_iters'] = ['max_iters = 400'] groups['447_iters'] = ['max_iters = 447'] groups['500_iters'] = ['max_iters = 500'] groups['850_iters'] = ['max_iters = 850'] groups['1000_iters'] = ['max_iters = 1000'] groups['2000_iters'] = ['max_iters = 2000'] groups['2445_iters'] = ['max_iters = 2445'] groups['3000_iters'] = ['max_iters = 3000'] groups['4000_iters'] = ['max_iters = 4000'] groups['4433_iters'] = ['max_iters = 4433'] groups['5000_iters'] = ['max_iters = 5000'] groups['10000_iters'] = ['max_iters = 10000'] groups['1k_iters'] = ['max_iters = 1000'] groups['2k_iters'] = ['max_iters = 2000'] groups['5k_iters'] = ['max_iters = 5000'] groups['10k_iters'] = ['max_iters = 10000'] groups['20k_iters'] = ['max_iters = 20000'] groups['30k_iters'] = ['max_iters = 30000'] groups['40k_iters'] = ['max_iters = 40000'] groups['50k_iters'] = ['max_iters = 50000'] groups['60k_iters'] = ['max_iters = 60000'] groups['80k_iters'] = ['max_iters = 80000'] groups['100k_iters'] = ['max_iters = 100000'] groups['100k10_iters'] = ['max_iters = 100010'] groups['200k_iters'] = ['max_iters = 200000'] groups['300k_iters'] = ['max_iters = 300000'] groups['400k_iters'] = ['max_iters = 400000'] groups['500k_iters'] = ['max_iters = 500000'] groups['resume'] = ['do_resume = True'] groups['reset_iter'] = ['reset_iter = True'] groups['log1'] = [ 'log_freq_train = 1', 'log_freq_val = 1', 'log_freq_test = 1', ] groups['log5'] = [ 'log_freq_train = 5', 'log_freq_val = 5', 'log_freq_test = 5', ] groups['log10'] = [ 'log_freq_train = 10', 'log_freq_val = 10', 'log_freq_test = 10', ] groups['log50'] = [ 'log_freq_train = 50', 'log_freq_val = 50', 'log_freq_test = 50', ] groups['log500'] = [ 'log_freq_train = 500', 'log_freq_val = 500', 'log_freq_test = 500', ] groups['log5000'] = [ 'log_freq_train = 5000', 'log_freq_val = 5000', 'log_freq_test = 5000', ] groups['no_logging'] = [ 'log_freq_train = 100000000000', 'log_freq_val = 100000000000', 'log_freq_test = 100000000000', ] # ############## pretrained nets ############## # groups['pretrained_sigen3d'] = [ # 'do_sigen3d = True', # 'sigen3d_init = "' + pret_carla.sigen3d_init + '"', # ] # groups['pretrained_conf'] = [ # 'do_conf = True', # 'conf_init = "' + pret_carla.conf_init + '"', # ] # groups['pretrained_up3D'] = [ # 'do_up3D = True', # 'up3D_init = "' + pret_carla.up3D_init + '"', # ] # groups['pretrained_center'] = [ # 'do_center = True', # 'center_init = "' + pret_carla.center_init + '"', # ] # groups['pretrained_seg'] = [ # 'do_seg = True', # 'seg_init = "' + pret_carla.seg_init + '"', # ] # groups['pretrained_motionreg'] = [ # 'do_motionreg = True', # 'motionreg_init = "' + pret_carla.motionreg_init + '"', # ] # groups['pretrained_gen3d'] = [ # 'do_gen3d = True', # 'gen3d_init = "' + pret_carla.gen3d_init + '"', # ] # groups['pretrained_vq2d'] = [ # 'do_vq2d = True', # 'vq2d_init = "' + pret_carla.vq2d_init + '"', # 'vq2d_num_embeddings = %d' % pret_carla.vq2d_num_embeddings, # ] # groups['pretrained_vq3d'] = [ # 'do_vq3d = True', # 'vq3d_init = "' + pret_carla.vq3d_init + '"', # 'vq3d_num_embeddings = %d' % pret_carla.vq3d_num_embeddings, # ] # groups['pretrained_feat2D'] = [ # 'do_feat2D = True', # 'feat2D_init = "' + pret_carla.feat2D_init + '"', # 'feat2D_dim = %d' % pret_carla.feat2D_dim, # ] groups['pretrained_feat3d'] = [ 'do_feat3d = True', 'feat3d_init = "' + pret_carla.feat3d_init + '"', 'feat3d_dim = %d' % pret_carla.feat3d_dim, ] groups['pretrained_occ'] = [ 'do_occ = True', 'occ_init = "' + pret_carla.occ_init + '"', ] # groups['pretrained_match'] = [ # 'do_match = True', # 'match_init = "' + pret_carla.match_init + '"', # ] # groups['pretrained_rigid'] = [ # 'do_rigid = True', # 'rigid_init = "' + pret_carla.rigid_init + '"', # ] # # groups['pretrained_pri2D'] = [ # # 'do_pri2D = True', # # 'pri2D_init = "' + pret_carla.pri2D_init + '"', # # ] # groups['pretrained_det'] = [ # 'do_det = True', # 'det_init = "' + pret_carla.det_init + '"', # ] # groups['pretrained_forecast'] = [ # 'do_forecast = True', # 'forecast_init = "' + pret_carla.forecast_init + '"', # ] # groups['pretrained_view'] = [ # 'do_view = True', # 'view_init = "' + pret_carla.view_init + '"', # 'view_depth = %d' % pret_carla.view_depth, # 'feat2D_dim = %d' % pret_carla.feat2D_dim, # # 'view_use_halftanh = ' + str(pret_carla.view_use_halftanh), # # 'view_pred_embs = ' + str(pret_carla.view_pred_embs), # # 'view_pred_rgb = ' + str(pret_carla.view_pred_rgb), # ] # groups['pretrained_flow'] = ['do_flow = True', # 'flow_init = "' + pret_carla.flow_init + '"', # ] # # groups['pretrained_tow'] = ['do_tow = True', # # 'tow_init = "' + pret_carla.tow_init + '"', # # ] # groups['pretrained_emb2D'] = ['do_emb2D = True', # 'emb2D_init = "' + pret_carla.emb2D_init + '"', # # 'emb_dim = %d' % pret_carla.emb_dim, # ] # groups['pretrained_preocc'] = [ # 'do_preocc = True', # 'preocc_init = "' + pret_carla.preocc_init + '"', # ] # groups['pretrained_vis'] = ['do_vis = True', # 'vis_init = "' + pret_carla.vis_init + '"', # # 'occ_cheap = ' + str(pret_carla.occ_cheap), # ] # groups['total_init'] = ['total_init = "' + pret_carla.total_init + '"'] # groups['pretrained_optim'] = ['optim_init = "' + pret_carla.optim_init + '"'] # groups['frozen_conf'] = ['do_freeze_conf = True', 'do_conf = True'] # groups['frozen_motionreg'] = ['do_freeze_motionreg = True', 'do_motionreg = True'] # groups['frozen_feat2D'] = ['do_freeze_feat2D = True', 'do_feat2D = True'] # groups['frozen_feat3D'] = ['do_freeze_feat3D = True', 'do_feat3D = True'] # groups['frozen_up3D'] = ['do_freeze_up3D = True', 'do_up3D = True'] # groups['frozen_vq3d'] = ['do_freeze_vq3d = True', 'do_vq3d = True'] # groups['frozen_view'] = ['do_freeze_view = True', 'do_view = True'] # groups['frozen_center'] = ['do_freeze_center = True', 'do_center = True'] # groups['frozen_seg'] = ['do_freeze_seg = True', 'do_seg = True'] # groups['frozen_vis'] = ['do_freeze_vis = True', 'do_vis = True'] # groups['frozen_flow'] = ['do_freeze_flow = True', 'do_flow = True'] # groups['frozen_match'] = ['do_freeze_match = True', 'do_match = True'] # groups['frozen_emb2D'] = ['do_freeze_emb2D = True', 'do_emb2D = True'] # groups['frozen_pri2D'] = ['do_freeze_pri2D = True', 'do_pri2D = True'] # groups['frozen_occ'] = ['do_freeze_occ = True', 'do_occ = True'] # groups['frozen_vq2d'] = ['do_freeze_vq2d = True', 'do_vq2d = True'] # groups['frozen_vq3d'] = ['do_freeze_vq3d = True', 'do_vq3d = True'] # groups['frozen_sigen3d'] = ['do_freeze_sigen3d = True', 'do_sigen3d = True'] # groups['frozen_gen3d'] = ['do_freeze_gen3d = True', 'do_gen3d = True'] # # groups['frozen_ego'] = ['do_freeze_ego = True', 'do_ego = True'] # # groups['frozen_inp'] = ['do_freeze_inp = True', 'do_inp = True']
/exp_carla_det.py
from exp_base import * ############## choose an experiment ############## current = 'det_builder' current = 'det_trainer' mod = '"det00"' # go mod = '"det01"' # rescore with inbound mod = '"det02"' # show scores mod = '"det03"' # show bev too mod = '"det04"' # narrower bounds, to see mod = '"det05"' # print scorelists mod = '"det06"' # rescore actually mod = '"det07"' # print score no matter what mod = '"det08"' # float2str mod = '"det09"' # run feat3d mod = '"det10"' # really run feat3d mod = '"det11"' # get axboxlist mod = '"det12"' # solid centorid mod = '"det13"' # update lrtlist util mod = '"det14"' # run detnet mod = '"det15"' # mod = '"det16"' # train a whiel mod = '"det17"' # bugfix mod = '"det18"' # return early if score < B/2 mod = '"det19"' # new utils mod = '"det20"' # B2 mod = '"det21"' # B4 mod = '"det22"' # clean up mod = '"det23"' # rand centroid mod = '"det24"' # padding 0 mod = '"det25"' # avoid warping to R0 mod = '"det26"' # scorelist *= inbound mod = '"det27"' # use scorelist in the vis mod = '"det28"' # only draw nonzero boxes mod = '"det29"' # cleaned up mod = '"det30"' # do not draw 0,1 scores mod = '"det31"' # cleaned up mod = '"det32"' # evaluate against axlrtlist mod = '"det33"' # only show a fixed number of digits mod = '"det34"' # fix that mod = '"det35"' # maxlen=3 mod = '"det36"' # log500 ############## define experiments ############## exps['det_builder'] = [ 'carla_det', # mode 'carla_multiview_10_data', # dataset 'seqlen1', '8-4-8_bounds', # '16-8-16_bounds', '3_iters', # '5k_iters', # 'lr3', 'train_feat3d', 'train_det', 'B1', 'no_shuf', 'no_backprop', # 'log50', 'log1', ] exps['det_trainer'] = [ 'carla_det', # mode # 'carla_multiview_10_data', # dataset 'carla_multiview_train_data', # dataset 'seqlen1', # 'carla_multiview_train_val_data', # dataset '16-8-16_bounds', # 'carla_16-8-16_bounds_train', # 'carla_16-8-16_bounds_val', '200k_iters', 'lr3', 'B4', 'train_feat3d', 'train_det', 'log500', ] ############## group configs ############## groups['carla_det'] = ['do_carla_det = True'] ############## datasets ############## # DHW for mem stuff SIZE = 32 Z = int(SIZE*4) Y = int(SIZE*2) X = int(SIZE*4) K = 8 # how many proposals to consider # H and W for proj stuff PH = int(H/2.0) PW = int(W/2.0) # S = 1 # groups['carla_multiview_10_data'] = [ # 'dataset_name = "carla"', # 'H = %d' % H, # 'W = %d' % W, # 'trainset = "mags7i3ten"', # 'trainset_format = "multiview"', # 'trainset_seqlen = %d' % S, # 'dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs"', # 'dataset_filetype = "npz"' # ] # groups['carla_multiview_train_val_data'] = [ # 'dataset_name = "carla"', # 'H = %d' % H, # 'W = %d' % W, # 'trainset = "mags7i3t"', # 'trainset_format = "multiview"', # 'trainset_seqlen = %d' % S, # 'valset = "mags7i3v"', # 'valset_format = "multiview"', # 'valset_seqlen = %d' % S, # 'dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs"', # 'dataset_filetype = "npz"' # ] ############## verify and execute ############## def _verify_(s): varname, eq, val = s.split(' ') assert varname in globals() assert eq == '=' assert type(s) is type('') print(current) assert current in exps for group in exps[current]: print(" " + group) assert group in groups for s in groups[group]: print(" " + s) _verify_(s) exec(s) s = "mod = " + mod _verify_(s) exec(s)
/exp_carla_ego.py
from exp_base import * ############## choose an experiment ############## current = 'builder' current = 'debugger' current = 'trainer' mod = '"eg00"' # nothing; builder mod = '"eg01"' # deleted junk mod = '"eg02"' # added hyps mod = '"eg03"' # train a while mod = '"eg04"' # 1 scale mod = '"eg05"' # no synth mod = '"eg06"' # consec=True mod = '"eg07"' # comment out the synth part < ok. but this npz has no motion mod = '"eg08"' # second file < a bit jumpier than i would like... mod = '"eg09"' # S = 3 mod = '"eg10"' # make my own thing; assert S==2 < ok, much cleaner, but still jumpy mod = '"eg11"' # cleaned up summs mod = '"eg12"' # cleaned up summs; include the occ transform mod = '"eg13"' # removed the warp loss mod = '"eg14"' # add summ of the gt mod = '"eg15"' # fix the hyps mod = '"eg16"' # renamed DHW as ZYX mod = '"eg17"' # same, fewer prints mod = '"eg18"' # feed rgbd input mod = '"eg19"' # cleaned up mod = '"eg20"' # train a while ############## exps ############## exps['builder'] = [ 'carla_ego', # mode 'carla_traj_10_data', # dataset 'carla_bounds', '3_iters', 'lr0', 'B1', 'no_shuf', 'train_feat3d', 'train_ego', 'log1', ] exps['debugger'] = [ 'carla_ego', # mode 'carla_traj_1_data', # dataset 'carla_bounds', '1k_iters', 'lr4', 'B1', 'train_feat3d', 'train_ego', 'no_shuf', 'log10', ] exps['trainer'] = [ 'carla_ego', # mode 'carla_traj_train_data', # dataset 'carla_bounds', '100k_iters', 'lr4', 'B2', 'train_feat3d', 'train_ego', 'log50', ] ############## groups ############## groups['carla_ego'] = ['do_carla_ego = True'] groups['train_feat3d'] = [ 'do_feat3d = True', 'feat3d_dim = 32', ] groups['train_ego'] = [ 'do_ego = True', 'ego_t_l2_coeff = 1.0', 'ego_deg_l2_coeff = 1.0', 'ego_num_scales = 2', 'ego_num_rots = 11', 'ego_max_deg = 4.0', 'ego_max_disp_z = 2', 'ego_max_disp_y = 1', 'ego_max_disp_x = 2', 'ego_synth_prob = 0.0', ] ############## datasets ############## # dims for mem SIZE = 32 Z = int(SIZE*4) Y = int(SIZE*1) X = int(SIZE*4) K = 2 # how many objects to consider N = 8 # how many objects per npz S = 2 H = 128 W = 384 # H and W for proj stuff PH = int(H/2.0) PW = int(W/2.0) dataset_location = "/projects/katefgroup/datasets/carla/processed/npzs" groups['carla_traj_1_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'trainset = "taqs100i2one"', 'trainset_format = "traj"', 'trainset_consec = True', 'trainset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] groups['carla_traj_10_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'trainset = "taqs100i2ten"', 'trainset_format = "traj"', 'trainset_consec = True', 'trainset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] groups['carla_traj_train_data'] = [ 'dataset_name = "carla"', 'H = %d' % H, 'W = %d' % W, 'trainset = "taqs100i2t"', 'trainset_format = "traj"', 'trainset_consec = True', 'trainset_seqlen = %d' % S, 'dataset_location = "%s"' % dataset_location, 'dataset_filetype = "npz"' ] ############## verify and execute ############## def _verify_(s): varname, eq, val = s.split(' ') assert varname in globals() assert eq == '=' assert type(s) is type('') print(current) assert current in exps for group in exps[current]: print(" " + group) assert group in groups for s in groups[group]: print(" " + s) _verify_(s) exec(s) s = "mod = " + mod _verify_(s) exec(s)
/exp_carla_static.py
from exp_base import * ############## choose an experiment ############## current = 'builder' current = 'trainer' # current = 'tester_basic' mod = '"sta00"' # nothing; builder mod = '"sta01"' # just prep and return mod = '"sta02"' # again, fewer prints mod = '"sta03"' # run feat3d forward; drop the sparse stuff mod = '"sta04"' # really run it mod = '"sta05"' # again mod = '"sta06"' # warp; show altfeat mod = '"sta07"' # ensure either ==1 or a==b mod = '"sta08"' # try emb mod = '"sta09"' # train a while mod = '"sta10"' # mod = '"sta11"' # show altfeat input mod = '"sta12"' # mod = '"sta13"' # train occ mod = '"sta14"' # move things to R mod = '"sta14"' # do view mod = '"sta15"' # encode in X0 mod = '"sta16"' # mod = '"sta17"' # show rgb_camX1, so i can understand the inbound idea better mod = '"sta18"' # show inbound separately mod = '"sta19"' # allow 0 to 32m mod = '"sta20"' # builder mod = '"sta21"' # show occ_memXs mod = '"sta22"' # wider bounds please mod = '"sta23"' # properly combine bounds with centorid mod = '"sta24"' # train a hwile mod = '"sta25"' # same but encode in Xs and warp to R then X0 mod = '"sta26"' # use resnet3d mod = '"sta27"' # skipnet; randomize the centroid a bit mod = '"sta28"' # wider rand, and inbound check mod = '"sta29"' # handle the false return mod = '"sta30"' # add emb2d mod = '"sta31"' # freeze the slow model mod = '"sta32"' # 2d parts mod = '"sta33"' # fewer prints mod = '"sta34"' # nice suffixes; JUST 2d learning mod = '"sta35"' # fix bug mod = '"sta36"' # better summ suffix mod = '"sta37"' # tell me about neg pool size mod = '"sta38"' # fix small bug in the hyp lettering mod = '"sta39"' # cleaned up hyps mod = '"sta40"' # weak smooth coeff on feats mod = '"sta41"' # run occnet on altfeat instead mod = '"sta42"' # redo mod = '"sta43"' # replication padding mod = '"sta44"' # pret 170k 02_s2_m128x32x128_p64x192_1e-3_F2_d32_F3_d32_s.01_O_c1_s.01_V_d32_e1_E2_e.1_n4_d32_c1_E3_n2_c1_mags7i3t_sta41 mod = '"sta45"' # inspect and maybe fix the loading; log10 mod = '"sta46"' # init slow in model base after saverloader mod = '"sta47"' # zero padding; log500 mod = '"sta48"' # replication padding; log500 mod = '"sta49"' # repeat after deleting some code mod = '"sta50"' # pret 02_s2_m128x32x128_1e-3_F3_d32_s.01_O_c2_s.1_E3_n2_c.1_mags7i3t_sta48 mod = '"sta51"' # same deal after some cleanup ############## exps ############## exps['builder'] = [ 'carla_static', # mode 'carla_multiview_10_data', # dataset '16-4-16_bounds', '3_iters', 'lr0', 'B1', 'no_shuf', 'train_feat3d', # 'train_occ', # 'train_view', # 'train_emb2d', # 'train_emb3d', 'log1', ] exps['trainer'] = [ 'carla_static', # mode 'carla_multiview_train_data', # dataset '16-4-16_bounds', '300k_iters', 'lr3', 'B2', 'pretrained_feat3d', 'pretrained_occ', 'train_feat3d', 'train_emb3d', 'train_occ', # 'train_view', # 'train_feat2d', # 'train_emb2d', 'log500', ] ############## groups ############## groups['carla_static'] = ['do_carla_static = True'] groups['train_feat2d'] = [ 'do_feat2d = True', 'feat2d_dim = 32', # 'feat2d_smooth_coeff = 0.1', ] groups['train_occ'] = [ 'do_occ = True', 'occ_coeff = 2.0', 'occ_smooth_coeff = 0.1', ] groups['train_view'] = [ 'do_view = True', 'view_depth = 32', 'view_l1_coeff = 1.0', ] groups['train_emb2d'] = [ 'do_emb2d = True', # 'emb2d_smooth_coeff = 0.01', 'emb2d_ce_coeff = 1.0', 'emb2d_l2_coeff = 0.1', 'emb2d_mindist = 32.0', 'emb2d_num_samples = 4', # 'do_view = True', # 'view_depth = 32', # 'view_l1_coeff = 1.0', ] groups['train_emb3d'] = [ 'do_emb3d = True', 'emb3d_ce_coeff = 0.1', # 'emb3d_mindist = 8.0', # 'emb3d_l2_coeff = 0.1', 'emb3d_num_samples = 2', ] ############## datasets ############## # dims for mem SIZE = 32 Z = int(SIZE*4) Y = int(SIZE*1) X = int(SIZE*4) K = 2 # how many objects to consider S = 2 H = 128 W = 384 # H and W for proj stuff PH = int(H/2.0) PW = int(W/2.0) ############## verify and execute ############## def _verify_(s): varname, eq, val = s.split(' ') assert varname in globals() assert eq == '=' assert type(s) is type('') print(current) assert current in exps for group in exps[current]: print(" " + group) assert group in groups for s in groups[group]: print(" " + s) _verify_(s) exec(s) s = "mod = " + mod _verify_(s) exec(s)
/hyperparams.py
import os # from munch import Munch H = 240 # height W = 320 # width Z = 128 Y = 64 X = 128 Z_val = 128 Y_val = 64 X_val = 128 Z_test = 128 Y_test = 64 X_test = 128 PH = int(128/4) PW = int(384/4) ZY = 32 ZX = 32 ZZ = 32 N = 8 # number of boxes per npz K = 1 # number of boxes to actually use # S = 2 # seq length # S_test = 3 # seq length T = 256 # height & width of birdview map V = 100000 # num velodyne points # metric bounds of mem space XMIN = -16.0 # right (neg is left) XMAX = 16.0 # right YMIN = -1.0 # down (neg is up) YMAX = 3.0 # down ZMIN = 2.0 # forward ZMAX = 34.0 # forward XMIN_val = -16.0 # right (neg is left) XMAX_val = 16.0 # right YMIN_val = -1.0 # down (neg is up) YMAX_val = 3.0 # down ZMIN_val = 2.0 # forward ZMAX_val = 34.0 # forward XMIN_test = -16.0 # right (neg is left) XMAX_test = 16.0 # right YMIN_test = -1.0 # down (neg is up) YMAX_test = 3.0 # down ZMIN_test = 2.0 # forward ZMAX_test = 34.0 # forward FLOOR = 2.65 # ground (2.65m downward from the cam) CEIL = (FLOOR-2.0) # #----------- loading -----------# do_include_summs = False do_include_vis = True do_test = False do_export_vis = False do_export_stats = False do_export_inds = False emb2d_init = "" feat2d_init = "" feat3d_init = "" flow_init = "" occ_init = "" view_init = "" ego_init = "" det_init = "" total_init = "" reset_iter = False do_freeze_emb2d = False do_freeze_feat2d = False do_freeze_feat3d = False do_freeze_occ = False do_freeze_view = False do_freeze_flow = False do_freeze_ego = False do_freeze_det = False do_resume = False # by default, only backprop on "train" iters backprop_on_train = True backprop_on_val = False backprop_on_test = False #----------- net design -----------# # by default, run nothing do_emb2d = False do_emb3d = False do_feat2d = False do_feat3d = False do_occ = False do_view = False do_flow = False do_ego = False do_det = False #----------- general hypers -----------# lr = 0.0 #----------- emb hypers -----------# emb2d_ml_coeff = 0.0 emb3d_ml_coeff = 0.0 emb2d_l2_coeff = 0.0 emb3d_l2_coeff = 0.0 emb2d_mindist = 0.0 emb3d_mindist = 0.0 emb2d_num_samples = 0 emb3d_num_samples = 0 emb2d_ce_coeff = 0.0 emb3d_ce_coeff = 0.0 #----------- feat3d hypers -----------# feat3d_dim = 32 feat3d_smooth_coeff = 0.0 #----------- feat2d hypers -----------# feat2d_smooth_coeff = 0.0 feat2d_dim = 8 #----------- occ hypers -----------# occ_coeff = 0.0 occ_smooth_coeff = 0.0 #----------- view hypers -----------# view_depth = 64 view_accu_render = False view_accu_render_unps = False view_accu_render_gt = False view_pred_embs = False view_pred_rgb = False view_l1_coeff = 0.0 #----------- det hypers -----------# det_anchor_size = 12.0 det_prob_coeff = 0.0 det_reg_coeff = 0.0 #----------- flow hypers -----------# flow_warp_coeff = 0.0 flow_warp_g_coeff = 0.0 flow_cycle_coeff = 0.0 flow_smooth_coeff = 0.0 flow_l1_coeff = 0.0 flow_l2_coeff = 0.0 # flow_synth_l1_coeff = 0.0 # flow_synth_l2_coeff = 0.0 flow_do_synth_rt = False flow_heatmap_size = 4 #----------- ego hypers -----------# ego_num_scales = 1 ego_num_rots = 0 ego_max_disp_z = 0 ego_max_disp_y = 0 ego_max_disp_x = 0 ego_max_deg = 0.0 ego_t_l2_coeff = 0.0 ego_deg_l2_coeff = 0.0 ego_synth_prob = 0.0 #----------- det hypers -----------# det_anchor_size = 12.0 det_prob_coeff = 0.0 det_reg_coeff = 0.0 #----------- mod -----------# mod = '""' ############ slower-to-change hyperparams below here ############ ## logging log_freq_train = 100 log_freq_val = 100 log_freq_test = 100 snap_freq = 10000 max_iters = 10000 shuffle_train = True shuffle_val = True shuffle_test = True trainset_format = 'seq' valset_format = 'seq' testset_format = 'seq' # should the seqdim be taken in consecutive order trainset_consec = True valset_consec = True testset_consec = True trainset_seqlen = 2 valset_seqlen = 2 testset_seqlen = 2 trainset_batch_size = 2 valset_batch_size = 1 testset_batch_size = 1 dataset_name = "" seqname = "" ind_dataset = '' trainset = "" valset = "" testset = "" dataset_location = "" dataset_filetype = "npz" # mode selection do_carla_static = False do_carla_det = False do_carla_ego = False ############ rev up the experiment ############ mode = os.environ["MODE"] print('os.environ mode is %s' % mode) if mode=="CARLA_STATIC": exec(compile(open('exp_carla_static.py').read(), 'exp_carla_static.py', 'exec')) elif mode=="CARLA_DET": exec(compile(open('exp_carla_det.py').read(), 'exp_carla_det.py', 'exec')) elif mode=="CARLA_EGO": exec(compile(open('exp_carla_ego.py').read(), 'exp_carla_ego.py', 'exec')) else: assert(False) # what mode is this? ############ make some final adjustments ############ trainset_path = "%s/%s.txt" % (dataset_location, trainset) valset_path = "%s/%s.txt" % (dataset_location, valset) testset_path = "%s/%s.txt" % (dataset_location, testset) data_paths = {} data_paths['train'] = trainset_path data_paths['val'] = valset_path data_paths['test'] = testset_path set_nums = {} set_nums['train'] = 0 set_nums['val'] = 1 set_nums['test'] = 2 set_names = ['train', 'val', 'test'] log_freqs = {} log_freqs['train'] = log_freq_train log_freqs['val'] = log_freq_val log_freqs['test'] = log_freq_test shuffles = {} shuffles['train'] = shuffle_train shuffles['val'] = shuffle_val shuffles['test'] = shuffle_test data_formats = {} data_formats['train'] = trainset_format data_formats['val'] = valset_format data_formats['test'] = testset_format data_consecs = {} data_consecs['train'] = trainset_consec data_consecs['val'] = valset_consec data_consecs['test'] = testset_consec seqlens = {} seqlens['train'] = trainset_seqlen seqlens['val'] = valset_seqlen seqlens['test'] = testset_seqlen batch_sizes = {} batch_sizes['train'] = trainset_batch_size batch_sizes['val'] = valset_batch_size batch_sizes['test'] = testset_batch_size ############ autogen a name; don't touch any hypers! ############ def strnum(x): s = '%g' % x if '.' in s: s = s[s.index('.'):] return s if do_test: name = "%02d_s%d" % (testset_batch_size, trainset_seqlen) name += "_m%dx%dx%d" % (Z_test, Y_test, X_test) else: name = "%02d_s%d" % (trainset_batch_size, trainset_seqlen) if do_feat3d: name += "_m%dx%dx%d" % (Z, Y, X) if do_view or do_emb2d: name += "_p%dx%d" % (PH,PW) if lr > 0.0: lrn = "%.1e" % lr # e.g., 5.0e-04 lrn = lrn[0] + lrn[3:5] + lrn[-1] name += "_%s" % lrn if do_feat2d: name += "_F2" if do_freeze_feat2d: name += "f" coeffs = [ feat2d_dim, feat2d_smooth_coeff, ] prefixes = [ "d", "s", ] for l_, l in enumerate(coeffs): if l > 0: name += "_%s%s" % (prefixes[l_],strnum(l)) if do_feat3d: name += "_F3" if do_freeze_feat3d: name += "f" coeffs = [ feat3d_dim, feat3d_smooth_coeff, ] prefixes = [ "d", "s", ] for l_, l in enumerate(coeffs): if l > 0: name += "_%s%s" % (prefixes[l_],strnum(l)) if do_ego: name += '_G_%dx%dx%dx%dx%d' % ( ego_num_scales, ego_num_rots, ego_max_disp_z, ego_max_disp_y, ego_max_disp_x, ) if do_freeze_ego: name += "f" ego_coeffs = [ ego_max_deg, ego_t_l2_coeff, ego_deg_l2_coeff, ego_synth_prob, ] ego_prefixes = [ "r", "t", "d", "p", ] for l_, l in enumerate(ego_coeffs): if l > 0: name += "_%s%s" % (ego_prefixes[l_],strnum(l)) if do_det: name += "_D" name += "%d" % det_anchor_size if do_freeze_det: name += "f" det_coeffs = [ det_prob_coeff, det_reg_coeff, ] det_prefixes = [ "p", "r", ] for l_, l in enumerate(det_coeffs): if l > 0: name += "_%s%s" % (det_prefixes[l_],strnum(l)) if do_occ: name += "_O" if do_freeze_occ: name += "f" occ_coeffs = [ occ_coeff, occ_smooth_coeff, ] occ_prefixes = [ "c", "s", ] for l_, l in enumerate(occ_coeffs): if l > 0: name += "_%s%s" % (occ_prefixes[l_],strnum(l)) if do_view: name += "_V" if view_pred_embs: name += "e" if view_pred_rgb: name += "r" if do_freeze_view: name += "f" view_coeffs = [ view_depth, view_l1_coeff, ] view_prefixes = [ "d", "e", ] for l_, l in enumerate(view_coeffs): if l > 0: name += "_%s%s" % (view_prefixes[l_],strnum(l)) if do_det: name += "_D" name += "%d" % det_anchor_size if do_freeze_det: name += "f" det_coeffs = [ det_prob_coeff, det_reg_coeff, ] det_prefixes = [ "p", "r", ] for l_, l in enumerate(det_coeffs): if l > 0: name += "_%s%s" % (det_prefixes[l_],strnum(l)) if do_emb2d: name += "_E2" if do_freeze_emb2d: name += "f" coeffs = [ emb2d_ml_coeff, emb2d_l2_coeff, emb2d_num_samples, emb2d_mindist, emb2d_ce_coeff, ] prefixes = [ "m", "e", "n", "d", "c", ] for l_, l in enumerate(coeffs): if l > 0: name += "_%s%s" % (prefixes[l_],strnum(l)) if do_emb3d: name += "_E3" coeffs = [ emb3d_ml_coeff, emb3d_l2_coeff, emb3d_num_samples, emb3d_mindist, emb3d_ce_coeff, ] prefixes = [ "m", "e", "n", "d", "c", ] for l_, l in enumerate(coeffs): if l > 0: name += "_%s%s" % (prefixes[l_],strnum(l)) if do_flow: name += "_F" if do_freeze_flow: name += "f" else: flow_coeffs = [flow_heatmap_size, flow_warp_coeff, flow_warp_g_coeff, flow_cycle_coeff, flow_smooth_coeff, flow_l1_coeff, flow_l2_coeff, # flow_synth_l1_coeff, # flow_synth_l2_coeff, ] flow_prefixes = ["h", "w", "g", "c", "s", "e", "f", # "y", # "x", ] for l_, l in enumerate(flow_coeffs): if l > 0: name += "_%s%s" % (flow_prefixes[l_],strnum(l)) ##### end model description # add some training data info sets_to_run = {} if trainset: name = "%s_%s" % (name, trainset) sets_to_run['train'] = True else: sets_to_run['train'] = False if valset: name = "%s_%s" % (name, valset) sets_to_run['val'] = True else: sets_to_run['val'] = False if testset: name = "%s_%s" % (name, testset) sets_to_run['test'] = True else: sets_to_run['test'] = False sets_to_backprop = {} sets_to_backprop['train'] = backprop_on_train sets_to_backprop['val'] = backprop_on_val sets_to_backprop['test'] = backprop_on_test if (not shuffle_train) or (not shuffle_val) or (not shuffle_test): name += "_ns" if mod: name = "%s_%s" % (name, mod) if do_resume: name += '_gt' total_init = name print(name)
/main.py
from model_carla_static import CARLA_STATIC from model_carla_ego import CARLA_EGO from model_carla_det import CARLA_DET import hyperparams as hyp import os import logging logger = logging.Logger('catch_all') def main(): checkpoint_dir_ = os.path.join("checkpoints", hyp.name) if hyp.do_carla_static: log_dir_ = os.path.join("logs_carla_static", hyp.name) elif hyp.do_carla_ego: log_dir_ = os.path.join("logs_carla_ego", hyp.name) elif hyp.do_carla_det: log_dir_ = os.path.join("logs_carla_det", hyp.name) else: assert(False) # what mode is this? if not os.path.exists(checkpoint_dir_): os.makedirs(checkpoint_dir_) if not os.path.exists(log_dir_): os.makedirs(log_dir_) try: if hyp.do_carla_static: model = CARLA_STATIC( checkpoint_dir=checkpoint_dir_, log_dir=log_dir_) model.go() elif hyp.do_carla_ego: model = CARLA_EGO( checkpoint_dir=checkpoint_dir_, log_dir=log_dir_) model.go() elif hyp.do_carla_det: model = CARLA_DET( checkpoint_dir=checkpoint_dir_, log_dir=log_dir_) model.go() else: assert(False) # what mode is this? except (Exception, KeyboardInterrupt) as ex: logger.error(ex, exc_info=True) log_cleanup(log_dir_) def log_cleanup(log_dir_): log_dirs = [] for set_name in hyp.set_names: log_dirs.append(log_dir_ + '/' + set_name) for log_dir in log_dirs: for r, d, f in os.walk(log_dir): for file_dir in f: file_dir = os.path.join(log_dir, file_dir) file_size = os.stat(file_dir).st_size if file_size == 0: os.remove(file_dir) if __name__ == '__main__': main()
/model_carla_det.py
import time import torch import torch.nn as nn import hyperparams as hyp import numpy as np import os from model_base import Model from nets.feat3dnet import Feat3dNet from nets.detnet import DetNet import utils.vox import utils.samp import utils.geom import utils.misc import utils.improc import utils.basic # import utils.track # import frozen_flow_net import utils.eval # from tensorboardX import SummaryWriter # from backend import saverloader, inputs # from torchvision import datasets, transforms np.set_printoptions(precision=2) np.random.seed(0) # EPS = 1e-6 # MAX_QUEUE = 10 # how many items before the summaryWriter flushes class CARLA_DET(Model): def initialize_model(self): print("------ INITIALIZING MODEL OBJECTS ------") self.model = CarlaDetModel() if hyp.do_freeze_feat3d: self.model.feat3dnet.eval() self.set_requires_grad(self.model.feat3dnet, False) if hyp.do_freeze_det: self.model.detnet.eval() self.set_requires_grad(self.model.detnet, False) class CarlaDetModel(nn.Module): def __init__(self): super(CarlaDetModel, self).__init__() if hyp.do_feat3d: self.feat3dnet = Feat3dNet(in_dim=4) if hyp.do_det: self.detnet = DetNet() def prepare_common_tensors(self, feed): results = dict() self.summ_writer = utils.improc.Summ_writer( writer=feed['writer'], global_step=feed['global_step'], log_freq=feed['set_log_freq'], fps=8, just_gif=True) global_step = feed['global_step'] self.B = feed['set_batch_size'] self.S = feed['set_seqlen'] self.set_name = feed['set_name'] # in det mode, we do not have much reason to have S>1 assert(self.S==1) __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) self.N = hyp.N self.Z, self.Y, self.X = hyp.Z, hyp.Y, hyp.X self.Z2, self.Y2, self.X2 = int(self.Z/2), int(self.Y/2), int(self.X/2) self.pix_T_cams = feed['pix_T_cams'] set_data_format = feed['set_data_format'] self.S = feed['set_seqlen'] self.origin_T_camRs = feed['origin_T_camRs'] self.origin_T_camXs = feed['origin_T_camXs'] self.camX0s_T_camXs = utils.geom.get_camM_T_camXs(self.origin_T_camXs, ind=0) self.camXs_T_camX0s = __u(utils.geom.safe_inverse(__p(self.camX0s_T_camXs))) self.camR0s_T_camRs = utils.geom.get_camM_T_camXs(self.origin_T_camRs, ind=0) self.camRs_T_camR0s = __u(utils.geom.safe_inverse(__p(self.camR0s_T_camRs))) self.camRs_T_camXs = __u(torch.matmul(__p(self.origin_T_camRs).inverse(), __p(self.origin_T_camXs))) self.camXs_T_camRs = __u(__p(self.camRs_T_camXs).inverse()) self.xyz_camXs = feed['xyz_camXs'] if self.set_name=='test' or self.set_name=='val': scene_centroid_x = 0.0 scene_centroid_y = 1.0 scene_centroid_z = 18.0 scene_centroid = np.array([scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy(scene_centroid).float().cuda() self.vox_util = utils.vox.Vox_util(self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) else: # randomize a bit, as a form of data aug all_ok = False num_tries = 0 while (not all_ok) and (num_tries < 100): scene_centroid_x = np.random.uniform(-8.0, 8.0) scene_centroid_y = np.random.uniform(-1.0, 3.0) scene_centroid_z = np.random.uniform(10.0, 26.0) scene_centroid = np.array([scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy(scene_centroid).float().cuda() num_tries += 1 all_ok = True self.vox_util = utils.vox.Vox_util(self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) # we want to ensure this gives us a few points inbound for each element inb = __u(self.vox_util.get_inbounds(__p(self.xyz_camXs), self.Z, self.Y, self.X, already_mem=False)) # this is B x S x N num_inb = torch.sum(inb.float(), axis=2) # this is B x S if torch.min(num_inb) < 300: all_ok = False self.summ_writer.summ_scalar('centroid_sampling/num_tries', float(num_tries)) self.summ_writer.summ_scalar('centroid_sampling/num_inb', torch.mean(num_inb).cpu().item()) if num_tries >= 100: return False # not OK; do not train on this self.vox_size_X = self.vox_util.default_vox_size_X self.vox_size_Y = self.vox_util.default_vox_size_Y self.vox_size_Z = self.vox_util.default_vox_size_Z origin_T_camRs_ = self.origin_T_camRs.reshape(self.B, self.S, 1, 4, 4).repeat(1, 1, self.N, 1, 1).reshape(self.B*self.S, self.N, 4, 4) boxlists = feed['boxlists'] self.scorelist_s = feed['scorelists'] self.tidlist_s = feed['tidlists'] boxlists_ = boxlists.reshape(self.B*self.S, self.N, 9) lrtlist_camRs_ = utils.misc.parse_boxes(boxlists_, origin_T_camRs_) self.lrtlist_camRs = lrtlist_camRs_.reshape(self.B, self.S, self.N, 19) self.lrtlist_camR0s = __u(utils.geom.apply_4x4_to_lrtlist(__p(self.camR0s_T_camRs), __p(self.lrtlist_camRs))) self.lrtlist_camXs = __u(utils.geom.apply_4x4_to_lrtlist(__p(self.camXs_T_camRs), __p(self.lrtlist_camRs))) self.lrtlist_camX0s = __u(utils.geom.apply_4x4_to_lrtlist(__p(self.camX0s_T_camXs), __p(self.lrtlist_camXs))) inbound_s = __u(utils.misc.rescore_lrtlist_with_inbound( __p(self.lrtlist_camX0s), __p(self.tidlist_s), self.Z, self.Y, self.X, self.vox_util)) self.scorelist_s *= inbound_s for b in list(range(self.B)): if torch.sum(self.scorelist_s[:,0]) < (self.B/2): # not worth it; return early return False # not OK; do not train on this self.rgb_camXs = feed['rgb_camXs'] self.summ_writer.summ_rgb('2d_inputs/rgb', self.rgb_camXs[:,0]) # get 3d voxelized inputs self.occ_memXs = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camXs), self.Z, self.Y, self.X)) self.unp_memXs = __u(self.vox_util.unproject_rgb_to_mem( __p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams))) # these are B x C x Z x Y x X self.summ_writer.summ_occs('3d_inputs/occ_memXs', torch.unbind(self.occ_memXs, dim=1)) self.summ_writer.summ_unps('3d_inputs/unp_memXs', torch.unbind(self.unp_memXs, dim=1), torch.unbind(self.occ_memXs, dim=1)) return True # OK def run_train(self, feed): total_loss = torch.tensor(0.0).cuda() __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) results = dict() # eliminate the seq dim, to make life easier lrtlist_camX = self.lrtlist_camXs[:, 0] rgb_camX0 = self.rgb_camXs[:,0] occ_memX0 = self.occ_memXs[:,0] unp_memX0 = self.unp_memXs[:,0] tidlist_g = self.tidlist_s[:,0] scorelist_g = self.scorelist_s[:,0] if hyp.do_feat3d: # start with a 4-channel feature map; feat_memX0_input = torch.cat([ occ_memX0, unp_memX0*occ_memX0, ], dim=1) # featurize feat3d_loss, feat_halfmemX0 = self.feat3dnet( feat_memX0_input, self.summ_writer) total_loss += feat3d_loss self.summ_writer.summ_feat('feat3d/feat_memX0_input', feat_memX0_input, pca=True) self.summ_writer.summ_feat('feat3d/feat_halfmemX0', feat_halfmemX0, pca=True) if hyp.do_det: # this detector can only handle axis-aligned boxes (like rcnn) # so first let's inflate the boxes to the nearest axis lines axlrtlist_camX = utils.geom.inflate_to_axis_aligned_lrtlist(lrtlist_camX) lrtlist_memX = self.vox_util.apply_mem_T_ref_to_lrtlist(lrtlist_camX, self.Z, self.Y, self.X) axlrtlist_memX = utils.geom.inflate_to_axis_aligned_lrtlist(lrtlist_memX) self.summ_writer.summ_lrtlist_bev( 'det/boxlist_g', occ_memX0[0:1], lrtlist_memX[0:1], scorelist_g, tidlist_g, self.vox_util, already_mem=True) self.summ_writer.summ_lrtlist_bev( 'det/axboxlist_g', occ_memX0[0:1], axlrtlist_memX[0:1], scorelist_g, tidlist_g, self.vox_util, already_mem=True) lrtlist_halfmemX = self.vox_util.apply_mem_T_ref_to_lrtlist(lrtlist_camX, self.Z2, self.Y2, self.X2) axlrtlist_halfmemX = utils.geom.inflate_to_axis_aligned_lrtlist(lrtlist_halfmemX) det_loss, boxlist_halfmemX_e, scorelist_e, tidlist_e, pred_objectness, sco, ove = self.detnet( axlrtlist_halfmemX, scorelist_g, feat_halfmemX0, self.summ_writer) total_loss += det_loss lrtlist_halfmemX_e = utils.geom.convert_boxlist_to_lrtlist(boxlist_halfmemX_e) lrtlist_camX_e = self.vox_util.apply_ref_T_mem_to_lrtlist(lrtlist_halfmemX_e, self.Z2, self.Y2, self.X2) lrtlist_e = lrtlist_camX_e[0:1] # lrtlist_g = lrtlist_camX[0:1] # true boxes lrtlist_g = axlrtlist_camX[0:1] # axis-aligned boxes scorelist_e = scorelist_e[0:1] scorelist_g = scorelist_g[0:1] lrtlist_e, lrtlist_g, scorelist_e, scorelist_g = utils.eval.drop_invalid_lrts( lrtlist_e, lrtlist_g, scorelist_e, scorelist_g) lenlist_e, _ = utils.geom.split_lrtlist(lrtlist_e) clist_e = utils.geom.get_clist_from_lrtlist(lrtlist_e) lenlist_g, _ = utils.geom.split_lrtlist(lrtlist_g) clist_g = utils.geom.get_clist_from_lrtlist(lrtlist_g) _, Ne, _ = list(lrtlist_e.shape) _, Ng, _ = list(lrtlist_g.shape) # only summ if there is at least one pred and one gt if Ne > 0 and Ng > 0: lrtlist_e_ = lrtlist_e.unsqueeze(2).repeat(1, 1, Ng, 1).reshape(1, Ne * Ng, -1) lrtlist_g_ = lrtlist_g.unsqueeze(1).repeat(1, Ne, 1, 1).reshape(1, Ne * Ng, -1) ious, _ = utils.geom.get_iou_from_corresponded_lrtlists(lrtlist_e_, lrtlist_g_) ious = ious.reshape(1, Ne, Ng) ious_e = torch.max(ious, dim=2)[0] self.summ_writer.summ_lrtlist( 'det/boxlist_eg', rgb_camX0[0:1], torch.cat((lrtlist_g, lrtlist_e), dim=1), torch.cat((ious_e.new_ones(1, Ng), ious_e), dim=1), torch.cat([torch.ones(1, Ng).long().cuda(), torch.ones(1, Ne).long().cuda()+1], dim=1), self.pix_T_cams[0:1, 0]) self.summ_writer.summ_lrtlist_bev( 'det/boxlist_bev_eg', occ_memX0[0:1], torch.cat((lrtlist_g, lrtlist_e), dim=1), torch.cat((ious_e.new_ones(1, Ng), ious_e), dim=1), torch.cat([torch.ones(1, Ng).long().cuda(), torch.ones(1, Ne).long().cuda()+1], dim=1), self.vox_util, already_mem=False) ious = [0.3, 0.4, 0.5, 0.6, 0.7] maps_3d, maps_2d = utils.eval.get_mAP_from_lrtlist(lrtlist_e, scorelist_e, lrtlist_g, ious) for ind, overlap in enumerate(ious): self.summ_writer.summ_scalar('ap_3d/%.2f_iou' % overlap, maps_3d[ind]) self.summ_writer.summ_scalar('ap_bev/%.2f_iou' % overlap, maps_2d[ind]) self.summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results, False def forward(self, feed): data_ok = self.prepare_common_tensors(feed) if not data_ok: # return early total_loss = torch.tensor(0.0).cuda() return total_loss, None, True else: if self.set_name=='train': return self.run_train(feed) else: print('not prepared for this set_name:', set_name) assert(False)
/model_carla_ego.py
import torch import torch.nn as nn import hyperparams as hyp import numpy as np # import imageio,scipy from model_base import Model from nets.feat3dnet import Feat3dNet from nets.egonet import EgoNet import torch.nn.functional as F import utils.vox import utils.samp import utils.geom import utils.improc import utils.basic import utils.eval import utils.misc np.set_printoptions(precision=2) np.random.seed(0) class CARLA_EGO(Model): def initialize_model(self): print('------ INITIALIZING MODEL OBJECTS ------') self.model = CarlaEgoModel() if hyp.do_freeze_feat3d: self.model.feat3dnet.eval() self.set_requires_grad(self.model.feat3dnet, False) if hyp.do_freeze_ego: self.model.egonet.eval() self.set_requires_grad(self.model.egonet, False) class CarlaEgoModel(nn.Module): def __init__(self): super(CarlaEgoModel, self).__init__() if hyp.do_feat3d: self.feat3dnet = Feat3dNet(in_dim=4) if hyp.do_ego: self.egonet = EgoNet( num_scales=hyp.ego_num_scales, num_rots=hyp.ego_num_rots, max_deg=hyp.ego_max_deg, max_disp_z=hyp.ego_max_disp_z, max_disp_y=hyp.ego_max_disp_y, max_disp_x=hyp.ego_max_disp_x) def prepare_common_tensors(self, feed): results = dict() self.summ_writer = utils.improc.Summ_writer( writer=feed['writer'], global_step=feed['global_step'], log_freq=feed['set_log_freq'], fps=8, just_gif=True) global_step = feed['global_step'] self.B = feed['set_batch_size'] self.S = feed['set_seqlen'] self.set_name = feed['set_name'] __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) self.H, self.W, self.V, self.N = hyp.H, hyp.W, hyp.V, hyp.N self.PH, self.PW = hyp.PH, hyp.PW if self.set_name=='test': self.Z, self.Y, self.X = hyp.Z_test, hyp.Y_test, hyp.X_test elif self.set_name=='val': self.Z, self.Y, self.X = hyp.Z_val, hyp.Y_val, hyp.X_val else: self.Z, self.Y, self.X = hyp.Z, hyp.Y, hyp.X self.Z2, self.Y2, self.X2 = int(self.Z/2), int(self.Y/2), int(self.X/2) self.Z4, self.Y4, self.X4 = int(self.Z/4), int(self.Y/4), int(self.X/4) self.ZZ, self.ZY, self.ZX = hyp.ZZ, hyp.ZY, hyp.ZX self.pix_T_cams = feed['pix_T_cams'] self.S = feed['set_seqlen'] # in this mode, we never use R coords, so we can drop the R/X notation self.origin_T_cams = feed['origin_T_camXs'] self.xyz_cams = feed['xyz_camXs'] scene_centroid_x = 0.0 scene_centroid_y = 1.0 scene_centroid_z = 18.0 scene_centroid = np.array([scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy(scene_centroid).float().cuda() self.vox_util = utils.vox.Vox_util(self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) self.vox_size_X = self.vox_util.default_vox_size_X self.vox_size_Y = self.vox_util.default_vox_size_Y self.vox_size_Z = self.vox_util.default_vox_size_Z self.rgb_cams = feed['rgb_camXs'] # get 3d voxelized inputs self.occ_mems = __u(self.vox_util.voxelize_xyz(__p(self.xyz_cams), self.Z, self.Y, self.X)) self.unp_mems = __u(self.vox_util.unproject_rgb_to_mem( __p(self.rgb_cams), self.Z, self.Y, self.X, __p(self.pix_T_cams))) # these are B x C x Z x Y x X self.summ_writer.summ_occs('3d_inputs/occ_mems', torch.unbind(self.occ_mems, dim=1)) self.summ_writer.summ_unps('3d_inputs/unp_mems', torch.unbind(self.unp_mems, dim=1), torch.unbind(self.occ_mems, dim=1)) return True # OK def run_train(self, feed): total_loss = torch.tensor(0.0).cuda() __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) results = dict() assert(hyp.do_ego) assert(self.S==2) origin_T_cam0 = self.origin_T_cams[:, 0] origin_T_cam1 = self.origin_T_cams[:, 1] cam0_T_cam1 = utils.basic.matmul2(utils.geom.safe_inverse(origin_T_cam0), origin_T_cam1) feat_mems_input = torch.cat([ self.occ_mems, self.occ_mems*self.unp_mems, ], dim=2) feat_loss, feat_halfmems_ = self.feat3dnet(__p(feat_mems_input), self.summ_writer) feat_halfmems = __u(feat_halfmems_) total_loss += feat_loss ego_loss, cam0_T_cam1_e, _ = self.egonet( feat_halfmems[:,0], feat_halfmems[:,1], cam0_T_cam1, self.vox_util, self.summ_writer) total_loss += ego_loss # try aligning the frames, for a qualitative result occ_mem0_e = self.vox_util.apply_4x4_to_vox(cam0_T_cam1_e, self.occ_mems[:,1]) self.summ_writer.summ_occs('ego/occs_aligned', [occ_mem0_e, self.occ_mems[:,0]]) self.summ_writer.summ_occs('ego/occs_unaligned', [self.occ_mems[:,0], self.occ_mems[:,1]]) self.summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results, False def forward(self, feed): data_ok = self.prepare_common_tensors(feed) if not data_ok: # return early total_loss = torch.tensor(0.0).cuda() return total_loss, None, True else: if self.set_name=='train': return self.run_train(feed) else: print('not prepared for this set_name:', set_name) assert(False)
/model_carla_static.py
import torch import torch.nn as nn import hyperparams as hyp import numpy as np # import imageio,scipy from model_base import Model from nets.occnet import OccNet from nets.feat2dnet import Feat2dNet from nets.feat3dnet import Feat3dNet from nets.emb2dnet import Emb2dNet from nets.emb3dnet import Emb3dNet from nets.viewnet import ViewNet import torch.nn.functional as F import utils.vox import utils.samp import utils.geom import utils.improc import utils.basic import utils.eval import utils.misc np.set_printoptions(precision=2) np.random.seed(0) class CARLA_STATIC(Model): def initialize_model(self): print('------ INITIALIZING MODEL OBJECTS ------') self.model = CarlaStaticModel() if hyp.do_freeze_feat3d: self.model.feat3dnet.eval() self.set_requires_grad(self.model.feat3dnet, False) if hyp.do_freeze_view: self.model.viewnet.eval() self.set_requires_grad(self.model.viewnet, False) if hyp.do_freeze_occ: self.model.occnet.eval() self.set_requires_grad(self.model.occnet, False) if hyp.do_freeze_emb2d: self.model.emb2dnet.eval() self.set_requires_grad(self.model.emb2dnet, False) if hyp.do_emb2d: # freeze the slow model self.model.feat2dnet_slow.eval() self.set_requires_grad(self.model.feat2dnet_slow, False) if hyp.do_emb3d: # freeze the slow model self.model.feat3dnet_slow.eval() self.set_requires_grad(self.model.feat3dnet_slow, False) class CarlaStaticModel(nn.Module): def __init__(self): super(CarlaStaticModel, self).__init__() if hyp.do_occ: self.occnet = OccNet() if hyp.do_view: self.viewnet = ViewNet() if hyp.do_feat2d: self.feat2dnet = Feat2dNet() if hyp.do_emb2d: self.emb2dnet = Emb2dNet() # make a slow net self.feat2dnet_slow = Feat2dNet(in_dim=3) if hyp.do_feat3d: self.feat3dnet = Feat3dNet(in_dim=4) if hyp.do_emb3d: self.emb3dnet = Emb3dNet() # make a slow net self.feat3dnet_slow = Feat3dNet(in_dim=4) def prepare_common_tensors(self, feed): results = dict() self.summ_writer = utils.improc.Summ_writer( writer=feed['writer'], global_step=feed['global_step'], log_freq=feed['set_log_freq'], fps=8, just_gif=True) global_step = feed['global_step'] self.B = feed['set_batch_size'] self.S = feed['set_seqlen'] self.set_name = feed['set_name'] __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) self.H, self.W, self.V, self.N = hyp.H, hyp.W, hyp.V, hyp.N self.PH, self.PW = hyp.PH, hyp.PW # if self.set_name=='test': # self.Z, self.Y, self.X = hyp.Z_test, hyp.Y_test, hyp.X_test # elif self.set_name=='val': # self.Z, self.Y, self.X = hyp.Z_val, hyp.Y_val, hyp.X_val # else: self.Z, self.Y, self.X = hyp.Z, hyp.Y, hyp.X self.Z2, self.Y2, self.X2 = int(self.Z/2), int(self.Y/2), int(self.X/2) self.Z4, self.Y4, self.X4 = int(self.Z/4), int(self.Y/4), int(self.X/4) self.ZZ, self.ZY, self.ZX = hyp.ZZ, hyp.ZY, hyp.ZX self.pix_T_cams = feed['pix_T_cams'] set_data_format = feed['set_data_format'] self.S = feed['set_seqlen'] self.origin_T_camRs = feed['origin_T_camRs'] self.origin_T_camXs = feed['origin_T_camXs'] self.camX0s_T_camXs = utils.geom.get_camM_T_camXs(self.origin_T_camXs, ind=0) self.camR0s_T_camRs = utils.geom.get_camM_T_camXs(self.origin_T_camRs, ind=0) self.camRs_T_camR0s = __u(utils.geom.safe_inverse(__p(self.camR0s_T_camRs))) self.camRs_T_camXs = __u(torch.matmul(__p(self.origin_T_camRs).inverse(), __p(self.origin_T_camXs))) self.camXs_T_camRs = __u(__p(self.camRs_T_camXs).inverse()) self.xyz_camXs = feed['xyz_camXs'] self.xyz_camRs = __u(utils.geom.apply_4x4(__p(self.camRs_T_camXs), __p(self.xyz_camXs))) self.xyz_camX0s = __u(utils.geom.apply_4x4(__p(self.camX0s_T_camXs), __p(self.xyz_camXs))) if self.set_name=='test' or self.set_name=='val': # fixed centroid scene_centroid_x = 0.0 scene_centroid_y = 1.0 scene_centroid_z = 18.0 else: # randomize a bit, as a form of data aug all_ok = False num_tries = 0 while (not all_ok) and (num_tries < 100): scene_centroid_x = np.random.uniform(-8.0, 8.0) scene_centroid_y = np.random.uniform(-1.5, 3.0) scene_centroid_z = np.random.uniform(10.0, 26.0) scene_centroid = np.array([scene_centroid_x, scene_centroid_y, scene_centroid_z]).reshape([1, 3]) self.scene_centroid = torch.from_numpy(scene_centroid).float().cuda() num_tries += 1 all_ok = True self.vox_util = utils.vox.Vox_util(self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True) # we want to ensure this gives us a few points inbound for each element inb = __u(self.vox_util.get_inbounds(__p(self.xyz_camX0s), self.Z, self.Y, self.X, already_mem=False)) # this is B x S x N num_inb = torch.sum(inb.float(), axis=2) # this is B x S if torch.min(num_inb) < 300: all_ok = False self.summ_writer.summ_scalar('centroid_sampling/num_tries', float(num_tries)) self.summ_writer.summ_scalar('centroid_sampling/num_inb', torch.mean(num_inb).cpu().item()) if num_tries >= 100: return False self.vox_size_X = self.vox_util.default_vox_size_X self.vox_size_Y = self.vox_util.default_vox_size_Y self.vox_size_Z = self.vox_util.default_vox_size_Z origin_T_camRs_ = self.origin_T_camRs.reshape(self.B, self.S, 1, 4, 4).repeat(1, 1, self.N, 1, 1).reshape(self.B*self.S, self.N, 4, 4) boxlists = feed['boxlists'] self.rgb_camXs = feed['rgb_camXs'] ## get the projected depthmap and inbound mask self.depth_camXs_, self.valid_camXs_ = utils.geom.create_depth_image(__p(self.pix_T_cams), __p(self.xyz_camXs), self.H, self.W) self.dense_xyz_camXs_ = utils.geom.depth2pointcloud(self.depth_camXs_, __p(self.pix_T_cams)) # we need to go to X0 to see what will be inbounds self.dense_xyz_camX0s_ = utils.geom.apply_4x4(__p(self.camX0s_T_camXs), self.dense_xyz_camXs_) self.inbound_camXs_ = self.vox_util.get_inbounds(self.dense_xyz_camX0s_, self.Z, self.Y, self.X).float() self.inbound_camXs_ = torch.reshape(self.inbound_camXs_, [self.B*self.S, 1, self.H, self.W]) self.depth_camXs = __u(self.depth_camXs_) self.valid_camXs = __u(self.valid_camXs_) * __u(self.inbound_camXs_) self.summ_writer.summ_oned('2d_inputs/depth_camX0', self.depth_camXs[:,0], maxval=32.0) self.summ_writer.summ_oned('2d_inputs/valid_camX0', self.valid_camXs[:,0], norm=False) self.summ_writer.summ_rgb('2d_inputs/rgb_camX0', self.rgb_camXs[:,0]) # get 3d voxelized inputs self.occ_memXs = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camXs), self.Z, self.Y, self.X)) self.unp_memXs = __u(self.vox_util.unproject_rgb_to_mem( __p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams))) # these are B x C x Z x Y x X self.summ_writer.summ_occs('3d_inputs/occ_memXs', torch.unbind(self.occ_memXs, dim=1)) self.summ_writer.summ_unps('3d_inputs/unp_memXs', torch.unbind(self.unp_memXs, dim=1), torch.unbind(self.occ_memXs, dim=1)) return True # OK def run_train(self, feed): results = dict() global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) ##################### ## run the nets ##################### if hyp.do_feat2d: feat2d_loss, feat_camX0 = self.feat2dnet( self.rgb_camXs[:,0], self.summ_writer, ) if hyp.do_emb2d: # for stability, we will also use a slow net here _, altfeat_camX0 = self.feat2dnet_slow(self.rgb_camXs[:,0]) if hyp.do_feat3d: # start with a 4-channel feature map; feat_memXs_input = torch.cat([ self.occ_memXs, self.unp_memXs*self.occ_memXs, ], dim=2) # featurize feat3d_loss, feat_memXs_ = self.feat3dnet( __p(feat_memXs_input[:,1:]), self.summ_writer) feat_memXs = __u(feat_memXs_) total_loss += feat3d_loss valid_memXs = torch.ones_like(feat_memXs[:,:,0:1]) feat_memRs = self.vox_util.apply_4x4s_to_voxs(self.camRs_T_camXs[:,1:], feat_memXs) valid_memRs = self.vox_util.apply_4x4s_to_voxs(self.camRs_T_camXs[:,1:], valid_memXs) # these are B x S x C x Z2 x Y2 x X2 feat_memR = utils.basic.reduce_masked_mean( feat_memRs, valid_memRs, dim=1) valid_memR = torch.max(valid_memRs, dim=1)[0] # these are B x C x Z2 x Y2 x X2 self.summ_writer.summ_feat('feat3d/feat_output_agg', feat_memR, valid_memR, pca=True) if hyp.do_emb3d: _, altfeat_memR = self.feat3dnet_slow(feat_memXs_input[:,0]) altvalid_memR = torch.ones_like(altfeat_memR[:,0:1]) self.summ_writer.summ_feat('feat3d/altfeat_input', feat_memXs_input[:,0], pca=True) self.summ_writer.summ_feat('feat3d/altfeat_output', altfeat_memR, pca=True) if hyp.do_occ: assert(hyp.do_feat3d) occ_memR_sup, free_memR_sup, _, _ = self.vox_util.prep_occs_supervision( self.camRs_T_camXs, self.xyz_camXs, self.Z2, self.Y2, self.X2, agg=True) occ_loss, occ_memR_pred = self.occnet( feat_memR, occ_memR_sup, free_memR_sup, valid_memR, self.summ_writer) total_loss += occ_loss if hyp.do_view: assert(hyp.do_feat3d) # decode the perspective volume into an image view_loss, rgb_camX0_e, viewfeat_camX0 = self.viewnet( self.pix_T_cams[:,0], self.camXs_T_camRs[:,0], feat_memR, self.rgb_camXs[:,0], self.vox_util, valid=self.valid_camXs[:,0], summ_writer=self.summ_writer) total_loss += view_loss if hyp.do_emb2d: assert(hyp.do_feat2d) if hyp.do_view: # anchor against the bottom-up 2d net valid_camX0 = F.interpolate(self.valid_camXs[:,0], scale_factor=0.5, mode='nearest') emb2d_loss, _ = self.emb2dnet( viewfeat_camX0, feat_camX0, valid_camX0, summ_writer=self.summ_writer, suffix='_view') total_loss += emb2d_loss # anchor against the slow net emb2d_loss, _ = self.emb2dnet( feat_camX0, altfeat_camX0, torch.ones_like(feat_camX0[:,0:1]), summ_writer=self.summ_writer, suffix='_slow') total_loss += emb2d_loss if hyp.do_emb3d: assert(hyp.do_feat3d) # compute 3D ML emb3d_loss = self.emb3dnet( feat_memR, altfeat_memR, valid_memR.round(), altvalid_memR.round(), self.summ_writer) total_loss += emb3d_loss self.summ_writer.summ_scalar('loss', total_loss.cpu().item()) return total_loss, results, False def run_test(self, feed): results = dict() global_step = feed['global_step'] total_loss = torch.tensor(0.0).cuda() # total_loss = torch.autograd.Variable(0.0, requires_grad=True).cuda() __p = lambda x: utils.basic.pack_seqdim(x, self.B) __u = lambda x: utils.basic.unpack_seqdim(x, self.B) # get the boxes boxlist_camRs = feed['boxlists'] tidlist_s = feed['tidlists'] # coordinate-less and plural scorelist_s = feed['scorelists'] # coordinate-less and plural lrtlist_camRs = __u(utils.geom.convert_boxlist_to_lrtlist(__p(boxlist_camRs))).reshape(self.B, self.S, self.N, 19) lrtlist_camXs = __u(utils.geom.apply_4x4_to_lrtlist(__p(self.camXs_T_camRs), __p(lrtlist_camRs))) # these are is B x S x N x 19 self.summ_writer.summ_lrtlist('obj/lrtlist_camX0', self.rgb_camXs[:,0], lrtlist_camXs[:,0], scorelist_s[:,0], tidlist_s[:,0], self.pix_T_cams[:,0]) self.summ_writer.summ_lrtlist('obj/lrtlist_camR0', self.rgb_camRs[:,0], lrtlist_camRs[:,0], scorelist_s[:,0], tidlist_s[:,0], self.pix_T_cams[:,0]) # mask_memX0 = utils.vox.assemble_padded_obj_masklist( # lrtlist_camXs[:,0], scorelist_s[:,0], self.Z2, self.Y2, self.X2, coeff=1.0) # mask_memX0 = torch.sum(mask_memX0, dim=1).clamp(0, 1) # self.summ_writer.summ_oned('obj/mask_memX0', mask_memX0, bev=True) mask_memXs = __u(utils.vox.assemble_padded_obj_masklist( __p(lrtlist_camXs), __p(scorelist_s), self.Z2, self.Y2, self.X2, coeff=1.0)) mask_memXs = torch.sum(mask_memXs, dim=2).clamp(0, 1) self.summ_writer.summ_oneds('obj/mask_memXs', torch.unbind(mask_memXs, dim=1), bev=True) for b in list(range(self.B)): for s in list(range(self.S)): mask = mask_memXs[b,s] if torch.sum(mask) < 2.0: # return early return total_loss, None, True # next: i want to treat features differently if they are in obj masks vs not # in particular, i want a different kind of retrieval metric if hyp.do_feat3d: # occXs is B x S x 1 x H x W x D # unpXs is B x S x 3 x H x W x D feat_memXs_input = torch.cat([self.occXs, self.occXs*self.unpXs], dim=2) feat_memXs_input_ = __p(feat_memXs_input) feat_memXs_, _, _ = self.feat3dnet( feat_memXs_input_, self.summ_writer, comp_mask=None, ) feat_memXs = __u(feat_memXs_) self.summ_writer.summ_feats('3d_feats/feat_memXs_input', torch.unbind(feat_memXs_input, dim=1), pca=True) self.summ_writer.summ_feats('3d_feats/feat_memXs_output', torch.unbind(feat_memXs, dim=1), pca=True) mv_precision = utils.eval.measure_semantic_retrieval_precision(feat_memXs[0], mask_memXs[0]) self.summ_writer.summ_scalar('semantic_retrieval/multiview_precision', mv_precision) ms_precision = utils.eval.measure_semantic_retrieval_precision(feat_memXs[:,0], mask_memXs[:,0]) self.summ_writer.summ_scalar('semantic_retrieval/multiscene_precision', ms_precision) return total_loss, None, False def forward(self, feed): data_ok = self.prepare_common_tensors(feed) if not data_ok: # return early total_loss = torch.tensor(0.0).cuda() return total_loss, None, True else: if self.set_name=='train': return self.run_train(feed) elif self.set_name=='test': return self.run_test(feed) else: print('weird set_name:', set_name) assert(False)
/nets/detnet.py
import numpy as np import torch import torch.nn as nn import torchvision import torchvision.ops as ops import utils.basic import utils.geom import utils.misc import hyperparams as hyp import archs.encoder3d def smooth_l1_loss(deltas, targets, sigma=3.0): sigma2 = sigma * sigma diffs = deltas - targets smooth_l1_signs = (torch.abs(diffs) < 1.0 / sigma2).float() smooth_l1_option1 = diffs**2 * 0.5 * sigma2 smooth_l1_option2 = torch.abs(diffs) - 0.5 / sigma2 smooth_l1_add = smooth_l1_option1 * smooth_l1_signs + smooth_l1_option2 * (1 - smooth_l1_signs) smooth_l1 = smooth_l1_add return smooth_l1 def binarize(input, threshold): return torch.where(input < threshold, torch.zeros_like(input), torch.ones_like(input)) def meshgrid3d_xyz(B, Z, Y, X): grid_z, grid_y, grid_x = utils.basic.meshgrid3d(B, Z, Y, X, stack=False) # each one is shaped B x Z x Y x X grid_z = grid_z.permute(0, 3, 2, 1) grid_x = grid_x.permute(0, 3, 2, 1) grid_y = grid_y.permute(0, 3, 2, 1) # make each one axis order XYZ grid = torch.stack([grid_x, grid_y, grid_z], dim=-1) return grid def anchor_deltas_to_bboxes(anchor_deltas, indices): # anchors deltas is num_objects x 6, first 3 for translation and last 3 for scale # grid_center is num_objects x 3 grid_center = indices.float() object_center = grid_center + anchor_deltas[:, :3] * hyp.det_anchor_size object_min = object_center - 0.5 * torch.exp(anchor_deltas[:, 3:]) * hyp.det_anchor_size object_max = object_center + 0.5 * torch.exp(anchor_deltas[:, 3:]) * hyp.det_anchor_size return torch.stack([object_min, object_max], 2), torch.cat([object_center, object_max - object_min], 1) #these are N x 3 x 2 and N x 6, respectively def overlap_graph(boxes1, boxes2): #tested # boxes1: batch x 3 x 2 (z1,z2,y1,y2,x1,x2) b1_bs = boxes1.shape[0] #batch_size b2_bs = boxes2.shape[0] if b1_bs == 0 or b2_bs == 0: # torch's repeat will fail, so let's return early return torch.zeros(b1_bs, b2_bs) boxes1 = boxes1.view(-1, 6) boxes2 = boxes2.view(-1, 6) b1 = boxes1.unsqueeze(1).repeat(1, b2_bs, 1).view(-1, 6) #this is (b1xb2) x 6 b2 = boxes2.unsqueeze(0).repeat(b1_bs, 1, 1).view(-1, 6) b1_z1, b1_z2, b1_y1, b1_y2, b1_x1, b1_x2 = torch.chunk(b1, 6, dim=1) b2_z1, b2_z2, b2_y1, b2_y2, b2_x1, b2_x2 = torch.chunk(b2, 6, dim=1) z1 = torch.max(b1_z1, b2_z1) z2 = torch.min(b1_z2, b2_z2) y1 = torch.max(b1_y1, b2_y1) y2 = torch.min(b1_y2, b2_y2) x1 = torch.max(b1_x1, b2_x1) x2 = torch.min(b1_x2, b2_x2) intersection = torch.max(z2 - z1, torch.zeros_like(z1)) * torch.max(y2 - y1, torch.zeros_like(y1)) * torch.max(x2 - x1, torch.zeros_like(x1)) b1_area = (b1_z2 - b1_z1) * (b1_y2 - b1_y1) * (b1_x2 - b1_x1) b2_area = (b2_z2 - b2_z1) * (b2_y2 - b2_y1) * (b2_x2 - b2_x1) union = b1_area + b2_area - intersection iou = intersection / union overlaps = iou.view(b1_bs, b2_bs) return overlaps def box_refinement_graph(positive_rois, roi_gt_boxes): # roi_gt_boxes are N x 3 x 2 gt_center = torch.mean(roi_gt_boxes, dim=2) pd_center = torch.mean(positive_rois, dim=2) #these are N x 3 (zyx order) delta_zyx = gt_center-pd_center len_gt = roi_gt_boxes[:,:,1] - roi_gt_boxes[:,:,0] len_pd = positive_rois[:,:,1] - positive_rois[:,:,0] delta_len = len_gt - len_pd return torch.cat([delta_zyx, delta_len], dim=1) # N x 6 def rpn_proposal_graph(pred_objectness, pred_anchor_deltas, valid_mask, corners_min_max_g, iou_thresh=0.5): #tested ######################## ROI generation #################### # object_bbox: batch_size x N x 3 x 2 # pred_objectness: B x X x Y x Z # pred_anchor_deltas: B x X x Y x Z x 6 # valid_mask: B x N # corners_min_max_g: B x N x 3 x 2, in xyz order P_THRES = 0.9 high_prob_indices = torch.stack(torch.where(pred_objectness > P_THRES), dim=1) # this is ? x 4, last dim in bxyz order B = pred_objectness.shape[0] # build prediction target bs_selected_boxes_co = [] bs_selected_scores = [] bs_overlaps = [] if len(high_prob_indices > 0): for i in list(range(B)): selected_boxes, selected_boxes_scores, overlaps, selected_boxes_co = detection_target_graph(i, high_prob_indices, \ corners_min_max_g, valid_mask, pred_objectness, pred_anchor_deltas, iou_thresh=iou_thresh) bs_selected_boxes_co.append(selected_boxes_co) bs_selected_scores.append(selected_boxes_scores) bs_overlaps.append(overlaps) return bs_selected_boxes_co, bs_selected_scores, bs_overlaps else: return None, None, None def detection_target_graph(i, high_prob_indices, corners_min_max_g, valid_mask, pred_objectness, pred_anchor_deltas, iou_thresh=0.5): #tested batch_i_idxs = torch.stack(torch.where(high_prob_indices[:,0] == i), dim=1) # this is (?, 1) batch_i_indices = high_prob_indices[batch_i_idxs.squeeze(dim=1)] # this is ? x 4 batch_i_scores = pred_objectness[batch_i_indices[:, 0], batch_i_indices[:, 1], batch_i_indices[:, 2], batch_i_indices[:, 3]] # this is (?, ) batch_i_anchor_deltas = pred_anchor_deltas[batch_i_indices[:, 0], batch_i_indices[:, 1], batch_i_indices[:, 2], batch_i_indices[:, 3]] # this is (?, 6) # don't know why all out of a sudden order becomes zyx, but we follow this zyx order for the following code ... # co refers to center + offset parameterization batch_i_bboxes, batch_i_bboxes_co = anchor_deltas_to_bboxes( batch_i_anchor_deltas, batch_i_indices[:,1:]) # N x 3 x 2 and N x 6 # print(batch_i_bboxes[:, 1:, :].permute(0, 2, 1).shape) selected_bboxes_idx_xy = ops.nms( batch_i_bboxes[:, 1:, :].permute(0, 2, 1).contiguous().view(-1, 4).cpu(), # view() fails, so we introduce this contiguous() batch_i_scores.cpu(), iou_thresh).cuda() selected_bboxes_idx_zx = ops.nms( batch_i_bboxes[:, [0,2], :].permute(0, 2, 1).contiguous().view(-1, 4).cpu(), batch_i_scores.cpu(), iou_thresh).cuda() selected_bboxes_idx = torch.unique(torch.cat([selected_bboxes_idx_xy, selected_bboxes_idx_zx], dim=0)) # this is (selected_bbox, ) selected_3d_bboxes = batch_i_bboxes[selected_bboxes_idx] # this is (selected_bbox, 3, 2) selected_3d_bboxes_co = batch_i_bboxes_co[selected_bboxes_idx] # this is (selected_bbox, 6) selected_3d_bboxes_scores = batch_i_scores[selected_bboxes_idx] valid_inds = torch.stack(torch.where(valid_mask[i, :]), dim=1).squeeze(dim=1) # this is (valid_ids, ) corners_min_max_g_i = corners_min_max_g[i, valid_inds] # (valid_ids, 3, 2) # calculate overlap in 3d overlaps = overlap_graph(selected_3d_bboxes, corners_min_max_g_i) # this is (selected_bbox, valid_ids) return selected_3d_bboxes, selected_3d_bboxes_scores, overlaps, selected_3d_bboxes_co class DetNet(nn.Module): def __init__(self): print('DetNet...') super(DetNet, self).__init__() self.pred_dim = 7 self.net = torch.nn.Conv3d(in_channels=hyp.feat3d_dim, out_channels=self.pred_dim, kernel_size=3, stride=1, padding=1).cuda() print(self.net) def forward(self, lrtlist_g, scores_g, feat_zyx, summ_writer ): total_loss = torch.tensor(0.0).cuda() B, C, Z, Y, X = feat_zyx.shape _, N, _ = lrtlist_g.shape pred_dim = self.pred_dim # total 7, 6 deltas, 1 objectness feat = feat_zyx.permute(0, 1, 4, 3, 2) # get feat in xyz order, now B x C x X x Y x Z corners = utils.geom.get_xyzlist_from_lrtlist(lrtlist_g) # corners is B x N x 8 x 3, last dim in xyz order corners_max = torch.max(corners, dim=2)[0] # B x N x 3 corners_min = torch.min(corners, dim=2)[0] corners_min_max_g = torch.stack([corners_min, corners_max], dim=3) # this is B x N x 3 x 2 # trim down, to save some time N = min(N, hyp.K) corners_min_max_g = corners_min_max_g[:,:N] scores_g = scores_g[:, :N] # B x N # boxes_g is [-0.5~63.5, -0.5~15.5, -0.5~63.5] centers_g = utils.geom.get_clist_from_lrtlist(lrtlist_g) # centers_g is B x N x 3 grid = meshgrid3d_xyz(B, Z, Y, X)[0] # just one grid please, this is X x Y x Z x 3 delta_positions_raw = centers_g.view(B, N, 1, 1, 1, 3) - grid.view(1, 1, X, Y, Z, 3) delta_positions = delta_positions_raw / hyp.det_anchor_size lengths_g = utils.geom.get_lenlist_from_lrtlist(lrtlist_g) # B x N x 3 delta_lengths = torch.log(lengths_g / hyp.det_anchor_size) delta_lengths = torch.max(delta_lengths, -1e6 * torch.ones_like(delta_lengths)) # to avoid -infs turning into nans lengths_g = lengths_g.view(B, N, 1, 1, 1, 3).repeat(1, 1, X, Y, Z, 1) # B x N x X x Y x Z x 3 delta_lengths = delta_lengths.view(B, N, 1, 1, 1, 3).repeat(1, 1, X, Y, Z, 1) # B x N x X x Y x Z x 3 valid_mask = scores_g.view(B, N, 1, 1, 1, 1).repeat(1, 1, X, Y, Z, 1) # B x N x X x Y x Z x 1 delta_gt = torch.cat([delta_positions, delta_lengths], -1) # B x N x X x Y x Z x 6 object_dist = torch.max(torch.abs(delta_positions_raw)/(lengths_g * 0.5 + 1e-5), dim=5)[0] # B x N x X x Y x Z object_dist_mask = (torch.ones_like(object_dist) - binarize(object_dist, 0.5)).unsqueeze(dim=5) # B x N x X x Y x Z x 1 object_dist_mask = object_dist_mask * valid_mask # B x N x X x Y x Z x 1 object_neg_dist_mask = torch.ones_like(object_dist) - binarize(object_dist, 0.8) object_neg_dist_mask = object_neg_dist_mask * valid_mask.squeeze(dim=5) # B x N x X x Y x Z anchor_deltas_gt = None for obj_id in list(range(N)): if anchor_deltas_gt is None: anchor_deltas_gt = delta_gt[:, obj_id, :, :, :, :] * object_dist_mask[:, obj_id, :, :, :, :] current_mask = object_dist_mask[:, obj_id, :, :, :, :] else: # don't overwrite anchor positions that are already taken overlap = current_mask * object_dist_mask[:, obj_id, :, :, :, :] anchor_deltas_gt += (torch.ones_like(overlap)- overlap) * delta_gt[:, obj_id, :, :, :, :] * object_dist_mask[:, obj_id, :, :, :, :] current_mask = current_mask + object_dist_mask[:, obj_id, :, :, :, :] current_mask = binarize(current_mask, 0.5) pos_equal_one = binarize(torch.sum(object_dist_mask, dim=1), 0.5).squeeze(dim=4) # B x X x Y x Z neg_equal_one = binarize(torch.sum(object_neg_dist_mask, dim=1), 0.5) neg_equal_one = torch.ones_like(neg_equal_one) - neg_equal_one # B x X x Y x Z pos_equal_one_sum = torch.sum(pos_equal_one, [1,2,3]) # B neg_equal_one_sum = torch.sum(neg_equal_one, [1,2,3]) summ_writer.summ_occ('det/pos_equal_one', pos_equal_one.unsqueeze(1)) # set min to one in case no object, to avoid nan pos_equal_one_sum_safe = torch.max(pos_equal_one_sum, torch.ones_like(pos_equal_one_sum)) # B neg_equal_one_sum_safe = torch.max(neg_equal_one_sum, torch.ones_like(neg_equal_one_sum)) # B pred = self.net(feat) # this is B x 7 x X x Y x Z summ_writer.summ_feat('det/feat', feat, pca=False) summ_writer.summ_feat('det/pred', pred, pca=True) pred = pred.permute(0, 2, 3, 4, 1) # B x X x Y x Z x 7 pred_anchor_deltas = pred[..., 1:] # B x X x Y x Z x 6 pred_objectness_logits = pred[..., 0] # B x X x Y x Z pred_objectness = torch.nn.functional.sigmoid(pred_objectness_logits) # B x X x Y x Z alpha = 1.5 beta = 1.0 small_addon_for_BCE = 1e-6 overall_loss = torch.nn.functional.binary_cross_entropy_with_logits( input=pred_objectness_logits, target=pos_equal_one, reduction='none', ) cls_pos_loss = utils.basic.reduce_masked_mean(overall_loss, pos_equal_one) cls_neg_loss = utils.basic.reduce_masked_mean(overall_loss, neg_equal_one) loss_prob = torch.sum(alpha * cls_pos_loss + beta * cls_neg_loss) pos_mask = pos_equal_one.unsqueeze(dim=4) # B x X x Y x Z x 1 loss_l1 = smooth_l1_loss(pos_mask * pred_anchor_deltas, pos_mask * anchor_deltas_gt) # B x X x Y x Z x 1 loss_reg = torch.sum(loss_l1/pos_equal_one_sum_safe.view(-1, 1, 1, 1, 1))/float(B) total_loss = utils.misc.add_loss('det/detect_prob', total_loss, loss_prob, hyp.det_prob_coeff, summ_writer) total_loss = utils.misc.add_loss('det/detect_reg', total_loss, loss_reg, hyp.det_reg_coeff, summ_writer) # finally, turn the preds into hard boxes, with nms ( bs_selected_boxes_co, bs_selected_scores, bs_overlaps, ) = rpn_proposal_graph(pred_objectness, pred_anchor_deltas, scores_g, corners_min_max_g, iou_thresh=0.2) # these are lists of length B, each one leading with dim "?", since there is a variable number of objs per frame N = hyp.K*2 tidlist = torch.linspace(1.0, N, N).long().to('cuda') tidlist = tidlist.unsqueeze(0).repeat(B, 1) padded_boxes_e = torch.zeros(B, N, 9).float().cuda() padded_scores_e = torch.zeros(B, N).float().cuda() if bs_selected_boxes_co is not None: for b in list(range(B)): # make the boxes 1 x N x 9 (instead of B x ? x 6) padded_boxes0_e = bs_selected_boxes_co[b].unsqueeze(0) padded_scores0_e = bs_selected_scores[b].unsqueeze(0) padded_boxes0_e = torch.cat([padded_boxes0_e, torch.zeros([1, N, 6], device=torch.device('cuda'))], dim=1) # 1 x ? x 6 padded_scores0_e = torch.cat([padded_scores0_e, torch.zeros([1, N], device=torch.device('cuda'))], dim=1) # pad out padded_boxes0_e = padded_boxes0_e[:,:N] # clip to N padded_scores0_e = padded_scores0_e[:,:N] # clip to N padded_boxes0_e = torch.cat([padded_boxes0_e, torch.zeros([1, N, 3], device=torch.device('cuda'))], dim=2) padded_boxes_e[b] = padded_boxes0_e[0] padded_scores_e[b] = padded_scores0_e[0] return total_loss, padded_boxes_e, padded_scores_e, tidlist, pred_objectness, bs_selected_scores, bs_overlaps if __name__ == "__main__": A = torch.randn(5, 10) B = torch.randn(5, 10) # print(smooth_l1_loss(A, A+1)) # meshgrid3d_xyz(2, 64, 64, 64) boxes1 = torch.randn(2, 3, 1) boxes1 = boxes1.repeat(1, 1, 2) #2 x 3 x 2 boxes1[:, :, 1] += 1.0 boxes2 = boxes1 - 0.5 # print(overlap_graph(boxes1, boxes2)) # print(box_refinement_graph(boxes1, boxes2)) # boxes3d = torch.zeros(2, 2, 9).cuda() pred_objectness = torch.zeros(2, 10, 10, 10) pred_objectness[0,1,1,1] = 1.0 pred_anchor_deltas = torch.zeros(2, 10, 10, 10, 6) valid_mask = torch.ones(2, 1) corners_min_max_g = torch.tensor(np.array([[0.0, 1.5], [0.0, 1.5], [0.5, 1.5]])).view(1, 1, 3, 2).repeat(2, 1, 1, 1).float() bs_selected_boxes_co, bs_selected_scores, bs_overlaps = rpn_proposal_graph(pred_objectness, pred_anchor_deltas, valid_mask, corners_min_max_g) print(bs_overlaps)
/nets/egonet.py
import numpy as np import hyperparams as hyp import torch import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.ops as ops import utils.basic import utils.improc import utils.geom import utils.misc import utils.samp EPS = 1e-6 # acknowledgement: # Niles Christensen and Sohil Samir Savla developed the pytorch port of the original egonet.py, written in tensorflow def eval_against_gt(loss, cam0_T_cam1_e, cam0_T_cam1_g, t_coeff=0.0, deg_coeff=0.0, sc=1.0, summ_writer=None): # cam0_T_cam1_e is B x 4 x 4 # cam0_T_cam1_g is B x 4 x 4 r_e, t_e = utils.geom.split_rt(cam0_T_cam1_e) r_g, t_g = utils.geom.split_rt(cam0_T_cam1_g) _, ry_e, _ = utils.geom.rotm2eul(r_e) _, ry_g, _ = utils.geom.rotm2eul(r_g) deg_e = torch.unsqueeze(utils.geom.rad2deg(ry_e), axis=-1) deg_g = torch.unsqueeze(utils.geom.rad2deg(ry_g), axis=-1) t_l2 = torch.mean(utils.basic.sql2_on_axis(t_e-t_g, 1)) loss = utils.misc.add_loss('t_sql2_%.2f' % sc, loss, t_l2, t_coeff, summ_writer=summ_writer) deg_l2 = torch.mean(utils.basic.sql2_on_axis(deg_e-deg_g, 1)) loss = utils.misc.add_loss('deg_sql2_%.2f' % sc, loss, deg_l2, deg_coeff, summ_writer=summ_writer) return loss def cost_volume_3D(vox0, vox1, max_disp_z=4, max_disp_y=1, max_disp_x=4): # max_disp = max_displacement # vox0 is B x C x Z x Y x X # vox1 is B x C x Z x Y x X # return cost_vol, shaped B x E x Z x Y x X # E_i = max_disp_i*2 + 1 # E = \prod E_i # pad the top, bottom, left, and right of vox1 ones = torch.ones_like(vox1) vox1_pad = F.pad(vox1, (max_disp_z, max_disp_z, max_disp_y, max_disp_y, max_disp_x, max_disp_x), 'constant', 0) ones_pad = F.pad(ones, (max_disp_z, max_disp_z, max_disp_y, max_disp_y, max_disp_x, max_disp_x), 'constant', 0) _, _, d, h, w = vox0.shape loop_range1 = max_disp_z * 2 + 1 loop_range2 = max_disp_y * 2 + 1 loop_range3 = max_disp_x * 2 + 1 cost_vol = [] for z in range(0, loop_range1): for y in range(0, loop_range2): for x in range(0, loop_range3): vox1_slice = vox1_pad[:, :, z:z+d, y:y+h, x:x+w] ones_slice = ones_pad[:, :, z:z+d, y:y+h, x:x+w] cost = utils.basic.reduce_masked_mean(vox0*vox1_slice, ones_slice, dim=1, keepdim=True) cost_vol.append(cost) cost_vol = torch.cat(cost_vol, axis=1) return cost_vol class EgoNet(nn.Module): def __init__(self, num_scales=1, num_rots=3, max_deg=4, max_disp_z=1, max_disp_y=1, max_disp_x=1): print('EgoNet...') super(EgoNet, self).__init__() if num_scales: self.scales = [1] elif num_scales==2: self.scales = [0.5, 1] else: assert(False) # only 1-2 scales supported right now self.R = num_rots self.max_deg = max_deg # max degrees rotation, on either side of zero self.max_disp_z = max_disp_z self.max_disp_y = max_disp_y self.max_disp_x = max_disp_x self.E1 = self.max_disp_z*2 + 1 self.E2 = self.max_disp_y*2 + 1 self.E3 = self.max_disp_x*2 + 1 self.E = self.E1*self.E2*self.E3 self.first_layer = nn.Linear(self.R*self.E, 128).cuda() self.second_layer = nn.Linear(128, 128).cuda() self.third_layer = nn.Linear(128, 4).cuda() def forward(self, feat0, feat1, cam0_T_cam1_g, vox_util, summ_writer, reuse=False): total_loss = 0.0 utils.basic.assert_same_shape(feat0, feat1) summ_writer.summ_feats('ego/feats', [feat0, feat1], pca=True) total_loss, cam0_T_cam1_e, feat1_warped = self.multi_scale_corr3Dr( total_loss, feat0, feat1, vox_util, summ_writer, cam0_T_cam1_g, reuse=reuse) return total_loss, cam0_T_cam1_e, feat1_warped def multi_scale_corr3Dr(self, total_loss, feat0, feat1, vox_util, summ_writer, cam0_T_cam1_g=None, reuse=False, do_print=False): # the idea here is: # at each scale, find the answer, and then warp # to make the next scale closer to the answer # this allows a small displacement to be effective at each scale alignments = [] B, C, Z, Y, X = list(feat0.size()) utils.basic.assert_same_shape(feat0, feat1) summ_writer.summ_feat('ego/feat0', feat0, pca=True) summ_writer.summ_feat('ego/feat1', feat1, pca=True) if (cam0_T_cam1_g is not None): eye = utils.geom.eye_4x4(B) _ = eval_against_gt(0, eye, cam0_T_cam1_g, sc=0.0, summ_writer=summ_writer) feat1_backup = feat1.clone() rots = torch.linspace(-self.max_deg, self.max_deg, self.R) rots = torch.reshape(rots, [self.R]) rot_cam_total = torch.zeros([B]) delta_cam_total = torch.zeros([B, 3]) for sc in self.scales: Z_ = int(Z*sc) Y_ = int(Y*sc) X_ = int(X*sc) if not sc==1.0: feat0_ = F.interpolate(feat0, scale_factor=sc, mode='trilinear') feat1_ = F.interpolate(feat1, scale_factor=sc, mode='trilinear') else: feat0_ = feat0.clone() feat1_ = feat1.clone() # have a heatmap at least sized 3, so that an argmax is capable of returning 0 valid_Z = Z_-self.max_disp_z*2 valid_Y = Y_-self.max_disp_y*2 valid_X = X_-self.max_disp_x*2 assert(valid_Z >= 3) assert(valid_Y >= 3) assert(valid_X >= 3) summ_writer.summ_feat('ego/feat0_resized_%.3f' % sc, feat0_, pca=True) summ_writer.summ_feat('ego/feat1_resized_%.3f' % sc, feat1_, pca=True) ## now we want to rotate the features into all of the orientations # first we define the orientations r0 = torch.zeros([B*self.R]) ry = torch.unsqueeze(rots, axis=0).repeat([B, 1]).reshape([B*self.R]) r = utils.geom.eul2rotm(r0, utils.geom.deg2rad(ry), r0) t = torch.zeros([B*self.R, 3]) # this will carry us from "1" coords to "N" (new) coords camN_T_cam1 = utils.geom.merge_rt(r, t) # this is B*R x 4 x 4 # we want to apply this to feat1 # we first need the feats to lead with B*R feat0_ = torch.unsqueeze(feat0_, axis=1).repeat([1, self.R, 1, 1, 1, 1]) feat1_ = torch.unsqueeze(feat1_, axis=1).repeat([1, self.R, 1, 1, 1, 1]) feat0_ = feat0_.reshape([B*self.R, C, Z_, Y_, X_]) feat1_ = feat1_.reshape([B*self.R, C, Z_, Y_, X_]) featN_ = vox_util.apply_4x4_to_vox(camN_T_cam1, feat1_) featN__ = featN_.reshape([B, self.R, C, Z_, Y_, X_]) summ_writer.summ_feats('ego/featN_%.3f_postwarp' % sc, torch.unbind(featN__, axis=1), pca=False) cc = cost_volume_3D(feat0_, featN_, max_disp_z=self.max_disp_z, max_disp_y=self.max_disp_y, max_disp_x=self.max_disp_x) # cc is B*R x Z_ x Y_ x X_ x E, # i.e., each spatial location has a heatmap squished into the E dim # reduce along the spatial dims heat = torch.sum(cc, axis=[2,3,4]) # flesh out the heatmaps heat = heat.reshape([B, self.R, 1, self.E1, self.E2, self.E3]) # have a look summ_writer.summ_oned('ego/heat_%.3f' % sc, torch.mean(heat[0], axis=-2, keepdim=False)) feat = heat.reshape([B, self.R*self.E]) feat = F.leaky_relu(feat, negative_slope=0.1) # relja said normalizing helps: feat_norm = utils.basic.l2_on_axis(feat, 1, keepdim=True) feat = feat/(EPS+feat_norm) feat = self.first_layer(feat) feat = F.leaky_relu(feat, negative_slope=0.1) feat = self.second_layer(feat) feat = F.leaky_relu(feat, negative_slope=0.1) feat = self.third_layer(feat) r, y, x, z = torch.unbind(feat, axis=1) # convert the mem argmax into a translation in cam coords xyz_argmax_mem = torch.unsqueeze(torch.stack([x, y, z], axis=1), axis=1) xyz_zero_mem = torch.zeros([B, 1, 3]) # in the transformation, use Y*sc instead of Y_, in case we cropped instead of scaled xyz_argmax_cam = vox_util.Mem2Ref(xyz_argmax_mem.cuda(), int(Z*sc), int(Y*sc), int(X*sc)) xyz_zero_cam = vox_util.Mem2Ref(xyz_zero_mem.cuda(), int(Z*sc), int(Y*sc), int(X*sc)) xyz_delta_cam = xyz_argmax_cam-xyz_zero_cam # mem is aligned with cam, and scaling does not affect rotation rot_cam = r.clone() summ_writer.summ_histogram('xyz_delta_cam', xyz_delta_cam) summ_writer.summ_histogram('rot_cam', rot_cam) delta_cam_total += xyz_delta_cam.reshape([B, 3]).cpu() rot_cam_total += rot_cam.cpu() r0 = torch.zeros([B]) cam0_T_cam1_e = utils.geom.merge_rt(utils.geom.eul2rotm(r0, utils.geom.deg2rad(rot_cam_total), r0), -delta_cam_total) # bring feat1_backup into alignment with feat0, using the cumulative RT # if the estimate were perfect, this would yield feat0, but let's continue to call it feat1 feat1 = vox_util.apply_4x4_to_vox(cam0_T_cam1_e, feat1_backup) # we will use feat1 in the next iteration of the loop if (cam0_T_cam1_g is not None): total_loss = eval_against_gt(total_loss, cam0_T_cam1_e, cam0_T_cam1_g, t_coeff=hyp.ego_t_l2_coeff*sc, deg_coeff=hyp.ego_deg_l2_coeff*sc, sc=sc, summ_writer=summ_writer) return total_loss, cam0_T_cam1_e, feat1
/nets/emb2dnet.py
import torch import torch.nn as nn import torch.nn.functional as F import sys sys.path.append("..") import archs.encoder2d as encoder2d import hyperparams as hyp import utils.basic import utils.misc import utils.improc class Emb2dNet(nn.Module): def __init__(self): super(Emb2dNet, self).__init__() print('Emb2dNet...') self.batch_k = 2 self.num_samples = hyp.emb2d_num_samples assert(self.num_samples > 0) self.sampler = utils.misc.DistanceWeightedSampling(batch_k=self.batch_k, normalize=False) self.criterion = utils.misc.MarginLoss() #margin=args.margin,nu=args.nu) self.beta = 1.2 self.dict_len = 20000 self.neg_pool = utils.misc.SimplePool(self.dict_len, version='pt') self.ce = torch.nn.CrossEntropyLoss() def sample_embs(self, emb0, emb1, valid, B, Y, X, mod='', do_vis=False, summ_writer=None): if hyp.emb2d_mindist == 0.0: # pure random perm = torch.randperm(B*Y*X) emb0 = emb0.reshape(B*Y*X, -1) emb1 = emb1.reshape(B*Y*X, -1) valid = valid.reshape(B*Y*X, -1) emb0 = emb0[perm[:self.num_samples*B]] emb1 = emb1[perm[:self.num_samples*B]] valid = valid[perm[:self.num_samples*B]] return emb0, emb1, valid else: emb0_all = [] emb1_all = [] valid_all = [] for b in list(range(B)): sample_indices, sample_locs, sample_valids = utils.misc.get_safe_samples( valid[b], (Y, X), self.num_samples, mode='2d', tol=hyp.emb2d_mindist) emb0_s_ = emb0[b, sample_indices] emb1_s_ = emb1[b, sample_indices] # these are N x D emb0_all.append(emb0_s_) emb1_all.append(emb1_s_) valid_all.append(sample_valids) if do_vis and (summ_writer is not None): sample_mask = utils.improc.xy2mask_single(sample_locs, Y, X) summ_writer.summ_oned('emb2d/samples_%s/sample_mask' % mod, torch.unsqueeze(sample_mask, dim=0)) summ_writer.summ_oned('emb2d/samples_%s/valid' % mod, torch.reshape(valid, [B, 1, Y, X])) emb0_all = torch.cat(emb0_all, axis=0) emb1_all = torch.cat(emb1_all, axis=0) valid_all = torch.cat(valid_all, axis=0) return emb0_all, emb1_all, valid_all def compute_margin_loss(self, B, C, Y, X, emb0_vec, emb1_vec, valid_vec, mod='', do_vis=False, summ_writer=None): emb0_vec, emb1_vec, valid_vec = self.sample_embs( emb0_vec, emb1_vec, valid_vec, B, Y, X, mod=mod, do_vis=do_vis, summ_writer=summ_writer) emb_vec = torch.stack((emb0_vec, emb1_vec), dim=1).view(B*self.num_samples*self.batch_k,C) # this tensor goes e,g,e,g,... on dim 0 # note this means 2 samples per class; batch_k=2 y = torch.stack([torch.arange(0,self.num_samples*B), torch.arange(0,self.num_samples*B)], dim=1).view(self.num_samples*B*self.batch_k) # this tensor goes 0,0,1,1,2,2,... a_indices, anchors, positives, negatives, _ = self.sampler(emb_vec) margin_loss, _ = self.criterion(anchors, positives, negatives, self.beta, y[a_indices]) return margin_loss def compute_ce_loss(self, B, C, Y, X, emb_e_vec_all, emb_g_vec_all, valid_vec_all, mod='', do_vis=False, summ_writer=None): emb_e_vec, emb_g_vec, valid_vec = self.sample_embs(emb_e_vec_all, emb_g_vec_all, valid_vec_all, B, Y, X, mod=mod, do_vis=do_vis, summ_writer=summ_writer) _, emb_n_vec, _ = self.sample_embs(emb_e_vec_all, emb_g_vec_all, valid_vec_all, B, Y, X, mod=mod, do_vis=do_vis, summ_writer=summ_writer) emb_e_vec = emb_e_vec.view(B*self.num_samples, C) emb_g_vec = emb_g_vec.view(B*self.num_samples, C) emb_n_vec = emb_n_vec.view(B*self.num_samples, C) self.neg_pool.update(emb_n_vec.cpu()) # print('neg_pool len:', len(self.neg_pool)) emb_n = self.neg_pool.fetch().cuda() # print('emb_n', emb_n.shape) N2, C2 = list(emb_n.shape) assert (C2 == C) # l_negs = torch.mm(q.view(N, C), negs.view(C, N2)) # this is N x N2 emb_q = emb_e_vec.clone() emb_k = emb_g_vec.clone() # print('emb_q', emb_q.shape) # print('emb_k', emb_k.shape) N = emb_q.shape[0] l_pos = torch.bmm(emb_q.view(N,1,-1), emb_k.view(N,-1,1)) # print('l_pos', l_pos.shape) l_neg = torch.mm(emb_q, emb_n.T) # print('l_neg', l_neg.shape) l_pos = l_pos.view(N, 1) # print('l_pos', l_pos.shape) logits = torch.cat([l_pos, l_neg], dim=1) labels = torch.zeros(N, dtype=torch.long).cuda() temp = 0.07 emb_loss = self.ce(logits/temp, labels) # print('emb_loss', emb_loss.detach().cpu().numpy()) return emb_loss def forward(self, emb_e, emb_g, valid, summ_writer=None, suffix=''): total_loss = torch.tensor(0.0).cuda() if torch.isnan(emb_e).any() or torch.isnan(emb_g).any(): assert(False) B, C, H, W = list(emb_e.shape) # put channels on the end emb_e_vec = emb_e.permute(0,2,3,1).reshape(B, H*W, C) emb_g_vec = emb_g.permute(0,2,3,1).reshape(B, H*W, C) valid_vec = valid.permute(0,2,3,1).reshape(B, H*W, 1) assert(self.num_samples < (B*H*W)) # we will take num_samples from each one margin_loss = self.compute_margin_loss(B, C, H, W, emb_e_vec, emb_g_vec, valid_vec, 'all', True, summ_writer) total_loss = utils.misc.add_loss('emb2d/emb2d_ml_loss%s' % suffix, total_loss, margin_loss, hyp.emb2d_ml_coeff, summ_writer) ce_loss = self.compute_ce_loss(B, C, H, W, emb_e_vec, emb_g_vec.detach(), valid_vec, 'g', False, summ_writer) total_loss = utils.misc.add_loss('emb2d/emb_ce_loss', total_loss, ce_loss, hyp.emb2d_ce_coeff, summ_writer) l2_loss_im = utils.basic.sql2_on_axis(emb_e-emb_g.detach(), 1, keepdim=True) emb_l2_loss = utils.basic.reduce_masked_mean(l2_loss_im, valid) total_loss = utils.misc.add_loss('emb2d/emb2d_l2_loss%s' % suffix, total_loss, emb_l2_loss, hyp.emb2d_l2_coeff, summ_writer) if summ_writer is not None: summ_writer.summ_oned('emb2d/emb2d_l2_loss%s' % suffix, l2_loss_im) summ_writer.summ_feats('emb2d/embs_2d%s' % suffix, [emb_e, emb_g], pca=True) return total_loss, emb_g
/nets/emb3dnet.py
import torch import torch.nn as nn import torch.nn.functional as F import sys sys.path.append("..") import hyperparams as hyp import utils.improc import utils.misc import utils.vox import utils.basic class Emb3dNet(nn.Module): def __init__(self): super(Emb3dNet, self).__init__() print('Emb3dNet...') self.batch_k = 2 self.num_samples = hyp.emb3d_num_samples assert(self.num_samples > 0) self.sampler = utils.misc.DistanceWeightedSampling(batch_k=self.batch_k, normalize=False) self.criterion = utils.misc.MarginLoss() #margin=args.margin,nu=args.nu) self.beta = 1.2 self.dict_len = 20000 self.neg_pool = utils.misc.SimplePool(self.dict_len, version='pt') self.ce = torch.nn.CrossEntropyLoss() def sample_embs(self, emb0, emb1, valid, B, Z, Y, X, mod='', do_vis=False, summ_writer=None): if hyp.emb3d_mindist == 0.0: # pure random perm = torch.randperm(B*Z*Y*X) emb0 = emb0.reshape(B*Z*Y*X, -1) emb1 = emb1.reshape(B*Z*Y*X, -1) valid = valid.reshape(B*Z*Y*X, -1) emb0 = emb0[perm[:self.num_samples*B]] emb1 = emb1[perm[:self.num_samples*B]] valid = valid[perm[:self.num_samples*B]] return emb0, emb1, valid else: emb0_all = [] emb1_all = [] valid_all = [] for b in list(range(B)): sample_indices, sample_locs, sample_valids = utils.misc.get_safe_samples( valid[b], (Z, Y, X), self.num_samples, mode='3d', tol=hyp.emb3d_mindist) emb0_s_ = emb0[b, sample_indices] emb1_s_ = emb1[b, sample_indices] # these are N x D emb0_all.append(emb0_s_) emb1_all.append(emb1_s_) valid_all.append(sample_valids) if do_vis and (summ_writer is not None): sample_occ = utils.vox.voxelize_xyz(torch.unsqueeze(sample_locs, dim=0), Z, Y, X, already_mem=True) summ_writer.summ_occ('emb3d/samples_%s/sample_occ' % mod, sample_occ, reduce_axes=[2,3]) summ_writer.summ_occ('emb3d/samples_%s/valid' % mod, torch.reshape(valid, [B, 1, Z, Y, X]), reduce_axes=[2,3]) emb0_all = torch.cat(emb0_all, axis=0) emb1_all = torch.cat(emb1_all, axis=0) valid_all = torch.cat(valid_all, axis=0) return emb0_all, emb1_all, valid_all def compute_ce_loss(self, B, C, Z, Y, X, emb_e_vec_all, emb_g_vec_all, valid_vec_all, mod='', do_vis=False, summ_writer=None): emb_e_vec, emb_g_vec, valid_vec = self.sample_embs(emb_e_vec_all, emb_g_vec_all, valid_vec_all, B, Z, Y, X, mod=mod, do_vis=do_vis, summ_writer=summ_writer) _, emb_n_vec, _ = self.sample_embs(emb_e_vec_all, emb_g_vec_all, valid_vec_all, B, Z, Y, X, mod=mod, do_vis=do_vis, summ_writer=summ_writer) emb_e_vec = emb_e_vec.view(B*self.num_samples, C) emb_g_vec = emb_g_vec.view(B*self.num_samples, C) emb_n_vec = emb_n_vec.view(B*self.num_samples, C) self.neg_pool.update(emb_n_vec.cpu()) # print('neg_pool len:', len(self.neg_pool)) emb_n = self.neg_pool.fetch().cuda() # print('emb_n', emb_n.shape) N2, C2 = list(emb_n.shape) assert (C2 == C) # l_negs = torch.mm(q.view(N, C), negs.view(C, N2)) # this is N x N2 emb_q = emb_e_vec.clone() emb_k = emb_g_vec.clone() # print('emb_q', emb_q.shape) # print('emb_k', emb_k.shape) N = emb_q.shape[0] l_pos = torch.bmm(emb_q.view(N,1,-1), emb_k.view(N,-1,1)) # print('l_pos', l_pos.shape) l_neg = torch.mm(emb_q, emb_n.T) # print('l_neg', l_neg.shape) l_pos = l_pos.view(N, 1) # print('l_pos', l_pos.shape) logits = torch.cat([l_pos, l_neg], dim=1) labels = torch.zeros(N, dtype=torch.long).cuda() temp = 0.07 emb_loss = self.ce(logits/temp, labels) # print('emb_loss', emb_loss.detach().cpu().numpy()) return emb_loss def forward(self, emb_e, emb_g, vis_e, vis_g, summ_writer=None): total_loss = torch.tensor(0.0).cuda() if torch.isnan(emb_e).any() or torch.isnan(emb_g).any(): assert(False) B, C, D, H, W = list(emb_e.shape) # put channels on the end emb_e_vec = emb_e.permute(0,2,3,4,1).reshape(B, D*H*W, C) emb_g_vec = emb_g.permute(0,2,3,4,1).reshape(B, D*H*W, C) vis_e_vec = vis_e.permute(0,2,3,4,1).reshape(B, D*H*W, 1) vis_g_vec = vis_g.permute(0,2,3,4,1).reshape(B, D*H*W, 1) # ensure they are both nonzero, else we probably masked or warped something valid_vec_e = 1.0 - (emb_e_vec==0).all(dim=2, keepdim=True).float() valid_vec_g = 1.0 - (emb_g_vec==0).all(dim=2, keepdim=True).float() valid_vec = valid_vec_e * valid_vec_g vis_e_vec *= valid_vec vis_g_vec *= valid_vec # valid_g = 1.0 - (emb_g==0).all(dim=1, keepdim=True).float() assert(self.num_samples < (B*D*H*W)) # we will take num_samples from each one ce_loss = self.compute_ce_loss(B, C, D, H, W, emb_e_vec, emb_g_vec.detach(), vis_g_vec, 'g', False, summ_writer) total_loss = utils.misc.add_loss('emb3d/emb_ce_loss', total_loss, ce_loss, hyp.emb3d_ce_coeff, summ_writer) # where g is valid, we use it as reference and pull up e l2_loss = utils.basic.reduce_masked_mean(utils.basic.sql2_on_axis(emb_e-emb_g.detach(), 1, keepdim=True), vis_g) total_loss = utils.misc.add_loss('emb3d/emb3d_l2_loss', total_loss, l2_loss, hyp.emb3d_l2_coeff, summ_writer) l2_loss_im = torch.mean(utils.basic.sql2_on_axis(emb_e-emb_g, 1, keepdim=True), dim=3) if summ_writer is not None: summ_writer.summ_oned('emb3d/emb3d_l2_loss', l2_loss_im) summ_writer.summ_feats('emb3d/embs_3d', [emb_e, emb_g], pca=True) return total_loss
/nets/flownet.py
import torch import torch.nn as nn import torch.nn.functional as F # from spatial_correlation_sampler import SpatialCorrelationSampler import numpy as np # import sys # sys.path.append("..") import archs.encoder3D import hyperparams as hyp import utils_basic import utils_improc import utils_misc import utils_samp import math class FlowNet(nn.Module): def __init__(self): super(FlowNet, self).__init__() print('FlowNet...') self.debug = False # self.debug = True self.heatmap_size = hyp.flow_heatmap_size # self.scales = [0.0625, 0.125, 0.25, 0.5, 0.75, 1.0] # self.scales = [1.0] # self.scales = [0.25, 0.5, 1.0] # self.scales = [0.125, 0.25, 0.5, 0.75, 1.0] self.scales = [0.25, 0.5, 0.75, 1.0] self.num_scales = len(self.scales) # self.compress_dim = 16 # self.compressor = nn.Sequential( # nn.Conv3d(in_channels=hyp.feat_dim, out_channels=self.compress_dim, kernel_size=1, stride=1, padding=0), # ) self.correlation_sampler = SpatialCorrelationSampler( kernel_size=1, patch_size=self.heatmap_size, stride=1, padding=0, dilation_patch=1, ).cuda() self.flow_predictor = nn.Sequential( nn.Conv3d(in_channels=(self.heatmap_size**3), out_channels=64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(negative_slope=0.1), nn.Conv3d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(negative_slope=0.1), nn.Conv3d(in_channels=64, out_channels=3, kernel_size=1, stride=1, padding=0), ).cuda() self.smoothl1 = torch.nn.SmoothL1Loss(reduction='none') self.smoothl1_mean = torch.nn.SmoothL1Loss(reduction='mean') self.mse = torch.nn.MSELoss(reduction='none') self.mse_mean = torch.nn.MSELoss(reduction='mean') print(self.flow_predictor) def generate_flow(self, feat0, feat1, sc): B, C, D, H, W = list(feat0.shape) utils_basic.assert_same_shape(feat0, feat1) if self.debug: print('scale = %.2f' % sc) print('inputs:') print(feat0.shape) print(feat1.shape) if not sc==1.0: # assert(sc==0.5 or sc==0.25) # please only use 0.25, 0.5, or 1.0 right now feat0 = F.interpolate(feat0, scale_factor=sc, mode='trilinear', align_corners=False) feat1 = F.interpolate(feat1, scale_factor=sc, mode='trilinear', align_corners=False) D, H, W = int(D*sc), int(H*sc), int(W*sc) if self.debug: print('downsamps:') print(feat0.shape) print(feat1.shape) feat0 = feat0.contiguous() feat1 = feat1.contiguous() cc = self.correlation_sampler(feat0, feat1) if self.debug: print('cc:') print(cc.shape) cc = cc.view(B, self.heatmap_size**3, D, H, W) cc = F.relu(cc) # relu works better than leaky relu here if self.debug: print(cc.shape) cc = utils_basic.l2_normalize(cc, dim=1) flow = self.flow_predictor(cc) if self.debug: print('flow:') print(flow.shape) if not sc==1.0: # note 1px here means 1px/sc at the real scale # first let's put the pixels in the right places flow = F.interpolate(flow, scale_factor=(1./sc), mode='trilinear', align_corners=False) # now let's correct the scale flow = flow/sc if self.debug: print('flow up:') print(flow.shape) return flow def forward(self, feat0, feat1, flow_g, mask_g, summ_writer=None): total_loss = torch.tensor(0.0).cuda() B, C, D, H, W = list(feat0.shape) utils_basic.assert_same_shape(feat0, feat1) # feats = torch.cat([feat0, feat1], dim=0) # feats = self.compressor(feats) # feats = utils_basic.l2_normalize(feats, dim=1) # feat0, feat1 = feats[:B], feats[B:] flow_total = torch.zeros_like(flow_g) feat1_aligned = feat1.clone() # summ_writer.summ_feats('flow/feats_aligned_%.2f' % 0.0, [feat0, feat1_aligned]) feat_diff = torch.mean(utils_basic.l2_on_axis((feat1_aligned-feat0), 1, keepdim=True)) utils_misc.add_loss('flow/feat_align_diff_%.2f' % 0.0, 0, feat_diff, 0, summ_writer) for sc in self.scales: flow = self.generate_flow(feat0, feat1_aligned, sc) flow_total = flow_total + flow # compositional LK: warp the original thing using the cumulative flow feat1_aligned = utils_samp.backwarp_using_3D_flow(feat1, flow_total) valid1_region = utils_samp.backwarp_using_3D_flow(torch.ones_like(feat1[:,0:1]), flow_total) # summ_writer.summ_feats('flow/feats_aligned_%.2f' % sc, [feat0, feat1_aligned], # valids=[torch.ones_like(valid1_region), valid1_region]) feat_diff = utils_basic.reduce_masked_mean( utils_basic.l2_on_axis((feat1_aligned-feat0), 1, keepdim=True), valid1_region) utils_misc.add_loss('flow/feat_align_diff_%.2f' % sc, 0, feat_diff, 0, summ_writer) # ok done inference # now for losses/metrics: l1_diff_3chan = self.smoothl1(flow_total, flow_g) l1_diff = torch.mean(l1_diff_3chan, dim=1, keepdim=True) l2_diff_3chan = self.mse(flow_total, flow_g) l2_diff = torch.mean(l2_diff_3chan, dim=1, keepdim=True) nonzero_mask = ((torch.sum(torch.abs(flow_g), axis=1, keepdim=True) > 0.01).float())*mask_g yeszero_mask = (1.0-nonzero_mask)*mask_g l1_loss = utils_basic.reduce_masked_mean(l1_diff, mask_g) l2_loss = utils_basic.reduce_masked_mean(l2_diff, mask_g) l1_loss_nonzero = utils_basic.reduce_masked_mean(l1_diff, nonzero_mask) l1_loss_yeszero = utils_basic.reduce_masked_mean(l1_diff, yeszero_mask) l1_loss_balanced = (l1_loss_nonzero + l1_loss_yeszero)*0.5 l2_loss_nonzero = utils_basic.reduce_masked_mean(l2_diff, nonzero_mask) l2_loss_yeszero = utils_basic.reduce_masked_mean(l2_diff, yeszero_mask) l2_loss_balanced = (l2_loss_nonzero + l2_loss_yeszero)*0.5 clip = np.squeeze(torch.max(torch.abs(torch.mean(flow_g[0], dim=0))).detach().cpu().numpy()).item() if summ_writer is not None: summ_writer.summ_3D_flow('flow/flow_e_%.2f' % sc, flow_total*mask_g, clip=clip) summ_writer.summ_3D_flow('flow/flow_g_%.2f' % sc, flow_g, clip=clip) utils_misc.add_loss('flow/l1_loss_nonzero', 0, l1_loss_nonzero, 0, summ_writer) utils_misc.add_loss('flow/l1_loss_yeszero', 0, l1_loss_yeszero, 0, summ_writer) utils_misc.add_loss('flow/l1_loss_balanced', 0, l1_loss_balanced, 0, summ_writer) total_loss = utils_misc.add_loss('flow/l1_loss', total_loss, l1_loss, hyp.flow_l1_coeff, summ_writer) total_loss = utils_misc.add_loss('flow/l2_loss', total_loss, l2_loss, hyp.flow_l2_coeff, summ_writer) total_loss = utils_misc.add_loss('flow/warp', total_loss, feat_diff, hyp.flow_warp_coeff, summ_writer) # smooth loss dx, dy, dz = utils_basic.gradient3D(flow_total, absolute=True) smooth_vox = torch.mean(dx+dy+dx, dim=1, keepdims=True) if summ_writer is not None: summ_writer.summ_oned('flow/smooth_loss', torch.mean(smooth_vox, dim=3)) smooth_loss = torch.mean(smooth_vox) total_loss = utils_misc.add_loss('flow/smooth_loss', total_loss, smooth_loss, hyp.flow_smooth_coeff, summ_writer) return total_loss, flow_total
/nets/viewnet.py
import torch import torch.nn as nn import torch.nn.functional as F import sys sys.path.append("..") import archs.renderer import hyperparams as hyp from utils.basic import * import utils.improc import utils.basic import utils.misc import utils.geom class ViewNet(nn.Module): def __init__(self): super(ViewNet, self).__init__() print('ViewNet...') self.net = archs.renderer.Net3d2d(hyp.feat3d_dim, 64, 32, hyp.view_depth, depth_pool=8).cuda() self.rgb_layer = nn.Sequential( nn.LeakyReLU(), nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(), nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0), ).cuda() self.emb_layer = nn.Sequential( nn.LeakyReLU(), nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(), nn.Conv2d(32, hyp.feat2d_dim, kernel_size=1, stride=1, padding=0), ).cuda() print(self.net) def forward(self, pix_T_cam0, cam0_T_cam1, feat_mem1, rgb_g, vox_util, valid=None, summ_writer=None, test=False, suffix=''): total_loss = torch.tensor(0.0).cuda() B, C, H, W = list(rgb_g.shape) PH, PW = hyp.PH, hyp.PW if (PH < H) or (PW < W): # print('H, W', H, W) # print('PH, PW', PH, PW) sy = float(PH)/float(H) sx = float(PW)/float(W) pix_T_cam0 = utils.geom.scale_intrinsics(pix_T_cam0, sx, sy) if valid is not None: valid = F.interpolate(valid, scale_factor=0.5, mode='nearest') rgb_g = F.interpolate(rgb_g, scale_factor=0.5, mode='bilinear') feat_proj = vox_util.apply_pixX_T_memR_to_voxR( pix_T_cam0, cam0_T_cam1, feat_mem1, hyp.view_depth, PH, PW) feat = self.net(feat_proj) rgb = self.rgb_layer(feat) emb = self.emb_layer(feat) emb = utils.basic.l2_normalize(emb, dim=1) if test: return None, rgb, None loss_im = utils.basic.l1_on_axis(rgb-rgb_g, 1, keepdim=True) if valid is not None: rgb_loss = utils.basic.reduce_masked_mean(loss_im, valid) else: rgb_loss = torch.mean(loss_im) total_loss = utils.misc.add_loss('view/rgb_l1_loss', total_loss, rgb_loss, hyp.view_l1_coeff, summ_writer) # vis if summ_writer is not None: summ_writer.summ_oned('view/rgb_loss', loss_im) summ_writer.summ_rgbs('view/rgb', [rgb.clamp(-0.5, 0.5), rgb_g]) summ_writer.summ_rgb('view/rgb_e', rgb.clamp(-0.5, 0.5)) summ_writer.summ_rgb('view/rgb_g', rgb_g.clamp(-0.5, 0.5)) summ_writer.summ_feat('view/emb', emb, pca=True) if valid is not None: summ_writer.summ_rgb('view/rgb_e_valid', valid*rgb.clamp(-0.5, 0.5)) summ_writer.summ_rgb('view/rgb_g_valid', valid*rgb_g.clamp(-0.5, 0.5)) return total_loss, rgb, emb
/pretrained_nets_carla.py
ckpt = '02_s2_m128x32x128_p64x192_1e-3_F2_d32_F3_d32_s.01_O_c1_s.01_V_d32_e1_E2_e.1_n4_d32_c1_E3_n2_c1_mags7i3t_sta41' ckpt = '02_s2_m128x32x128_1e-3_F3_d32_s.01_O_c2_s.1_E3_n2_c.1_mags7i3t_sta48' feat3d_init = ckpt feat3d_dim = 32 occ_init = ckpt
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
SarthakJShetty/pyResearchInsights
refs/heads/master
{"/pyResearchInsights/__init__.py": ["/pyResearchInsights/Analyzer.py", "/pyResearchInsights/Cleaner.py", "/pyResearchInsights/Scraper.py", "/pyResearchInsights/common_functions.py", "/pyResearchInsights/NLP_Engine.py", "/pyResearchInsights/system_functions.py"], "/pyResearchInsights/example.py": ["/pyResearchInsights/Analyzer.py", "/pyResearchInsights/Cleaner.py", "/pyResearchInsights/Scraper.py", "/pyResearchInsights/common_functions.py", "/pyResearchInsights/NLP_Engine.py", "/pyResearchInsights/system_functions.py"], "/pyResearchInsights/Scraper.py": ["/pyResearchInsights/common_functions.py"], "/pyResearchInsights/Visualizer.py": ["/pyResearchInsights/common_functions.py"], "/pyResearchInsights/Analyzer.py": ["/pyResearchInsights/common_functions.py"], "/pyResearchInsights/Cleaner.py": ["/pyResearchInsights/common_functions.py"], "/pyResearchInsights/NLP_Engine.py": ["/pyResearchInsights/common_functions.py", "/pyResearchInsights/Visualizer.py"], "/pyResearchInsights/system_functions.py": ["/pyResearchInsights/common_functions.py"]}
└── └── pyResearchInsights ├── Analyzer.py ├── Cleaner.py ├── NLP_Engine.py ├── Scraper.py ├── Visualizer.py ├── __init__.py ├── common_functions.py ├── example.py └── system_functions.py
/pyResearchInsights/Analyzer.py
'''This code is part of the larger pyResearchInsights project, where we aim to study the research themes being discussed in scientific publications. This portion of the code analyzes the contents of the .txt file developed by the Scraper.py and saves it to a .csv for later visualization by the soon to be built Visualizer.py script Sarthak J. Shetty 01/09/2018''' '''Importing OS here to split the filename at the extension''' import os '''Importing status_logger here to log the details of the process run.''' from pyResearchInsights.common_functions import status_logger '''Importing the collections which contains the Counter function''' from collections import Counter '''Importing pandas here to build the dataframe''' import pandas as pd '''Importing numpy here to build the index of the pandas frameword''' import numpy as np def analyzer_pre_processing(abstracts_log_name, status_logger_name): '''Carries out the pre-processing tasks, such as folder creation''' analyzer_pre_processing_status_key="Carrying out pre-processing functions for analyzer" status_logger(status_logger_name, analyzer_pre_processing_status_key) try: '''If the Analyzer script is run independently, not as part of the pipeline as a whole, there would be no filename_CLEAND.txt. This ensures that that file can be processed independently.''' abstracts_txt_file_name = (abstracts_log_name.split(".txt")[0])+"_"+'CLEANED.txt' open(abstracts_txt_file_name, 'r') except FileNotFoundError: abstracts_txt_file_name = (abstracts_log_name.split(".txt")[0])+'.txt' '''This code strips the abstracts_log_name of its extension and adds a .csv to it''' abstracts_csv_file_name = (abstracts_log_name.split(".txt")[0]) + "_" + "FREQUENCY_CSV_DATA" + ".csv" analyzer_pre_processing_status_key = "Carried out pre-processing functions for analyzer" status_logger(status_logger_name, analyzer_pre_processing_status_key) return abstracts_txt_file_name, abstracts_csv_file_name def list_cleaner(list_to_be_cleaned, status_logger_name): list_cleaner_start_status_key = "Cleaning the list of words generated" status_logger(status_logger_name, list_cleaner_start_status_key) '''This function cleans the list containing the words found in the abstract. It eliminates words found in another pre-defined list of words.''' words_to_be_eliminated = ['from', 'subject', 're', 'edu', 'use', 'com', 'https', 'url', 'link', 'abstract', 'author', 'chapter', 'springer', 'title', "the", "of", "and", "in", "to", "a", "is", "for", "from", "with", "that", "by", "are", "on", "was", "as", "were", "url:", "abstract:", "abstract", "author:", "title:", "at", "be", "an", "have", "this", "which", "study", "been", "not", "has", "its", "also", "these", "this", "can", "a", 'it', 'their', "e.g.", "those", "had", "but", "while", "will", "when", "only", "author", "title", "there", "our", "did", "as", "if", "they", "such", "than", "no", "-", "could"] cleaned_list_of_words_in_abstract = [item for item in list_to_be_cleaned if item not in words_to_be_eliminated] list_cleaner_end_status_key = "Cleaned the list of words generated" status_logger(status_logger_name, list_cleaner_end_status_key) return cleaned_list_of_words_in_abstract def transfer_function(abstracts_txt_file_name, abstracts_csv_file_name, status_logger_name): '''This function is involved in the actual transfer of data from the .txt file to the .csv file''' transfer_function_status_key = "Copying data from"+" "+str(abstracts_txt_file_name)+" "+"to"+" "+"pandas dataframe" status_logger(status_logger_name, transfer_function_status_key) '''This list will contain all the words extracted from the .txt abstract file''' list_of_words_in_abstract=[] '''Each word is appended to the list, from the .txt file''' with open(abstracts_txt_file_name, 'r') as abstracts_txt_data: for line in abstracts_txt_data: for word in line.split(): list_of_words_in_abstract.append(word) '''This function cleans up the data of uneccessary words''' cleaned_list_of_words_in_abstract = list_cleaner(list_of_words_in_abstract, status_logger_name) '''A Counter is a dictionary, where the value is the frequency of term, which is the key''' dictionary_of_abstract_list = Counter(cleaned_list_of_words_in_abstract) length_of_abstract_list = len(dictionary_of_abstract_list) '''Building a dataframe to hold the data from the list, which in turn contains the data from ''' dataframe_of_abstract_words=pd.DataFrame(index=np.arange(0, length_of_abstract_list), columns=['Words', 'Frequency']) '''An element to keep tab of the number of elements being added to the list''' dictionary_counter = 0 '''Copying elements from the dictionary to the pandas file''' for dictionary_element in dictionary_of_abstract_list: if(dictionary_counter==length_of_abstract_list): pass else: dataframe_of_abstract_words.loc[dictionary_counter, 'Words'] = dictionary_element dataframe_of_abstract_words.loc[dictionary_counter, 'Frequency'] = dictionary_of_abstract_list[dictionary_element] dictionary_counter = dictionary_counter+1 transfer_function_status_key = "Copied data from"+" "+str(abstracts_txt_file_name)+" "+"to"+" "+"pandas dataframe" status_logger(status_logger_name, transfer_function_status_key) transfer_function_status_key = "Copying data from pandas dataframe to"+" "+str(abstracts_csv_file_name) status_logger(status_logger_name, transfer_function_status_key) '''Saving dataframe to csv file, without the index column''' dataframe_of_abstract_words.to_csv(abstracts_csv_file_name, index=False) transfer_function_status_key = "Copied data from pandas dataframe to"+" "+str(abstracts_csv_file_name) status_logger(status_logger_name, transfer_function_status_key) def analyzer_main(abstracts_log_name, status_logger_name): '''Declaring the actual analyzer_main function is integrated to Bias.py code''' analyzer_main_status_key="Entered the Analyzer.py code." status_logger(status_logger_name, analyzer_main_status_key) '''Calling the pre-processing and transfer functions here''' abstracts_txt_file_name, abstracts_csv_file_name = analyzer_pre_processing(abstracts_log_name, status_logger_name) transfer_function(abstracts_txt_file_name, abstracts_csv_file_name, status_logger_name) '''Logs the end of the process Analyzer code in the status_logger''' analyzer_main_status_key="Exiting the Analyzer.py code." status_logger(status_logger_name, analyzer_main_status_key)
/pyResearchInsights/Cleaner.py
''' Hello! This script is part of the larger pyResearchInsights project that you can check out here: https://github.com/SarthakJShetty/pyResearchInsights We are trying to build an end-to-end ACA tool here -Sarthak (03/10/2019) Purpose of this script: Clean the corpus of special character''' '''Importing the status logger function here to LOG the cleaner module working for debugging''' from pyResearchInsights.common_functions import status_logger '''This holds the elements of the abstract after it has been split at the spaces''' elements = [] '''Holds the dirty elements that contain the \\ and // in them''' dirty_elements = [] '''Holds the clean members of the abstract elements''' cleaned_str_list = [] '''Holds the screened abstracts, null of any special character occurances''' cleaned_texts = [] '''What needs to be implemented here? 1. A way for each element containing \\ to be put into a list. 2. Subtract said list from elements''' def txt_to_list(abstract_directory, status_logger_name): '''Converting the text file to a list for easier processing''' txt_to_list_start_status_key = "Converting text to list" status_logger(status_logger_name, txt_to_list_start_status_key) try: '''If the Cleaner script is run independently, not as part of the pipeline as a whole, there would be no filename_ANALYTICAL.txt. This ensures that that file can be processed independently.''' cleaner_abstract_directory = (abstract_directory.split(".txt")[0])+"_"+'ANALYTICAL.txt' folder = open(cleaner_abstract_directory, 'r') except FileNotFoundError: cleaner_abstract_directory = (abstract_directory.split(".txt")[0])+'.txt' folder = open(cleaner_abstract_directory, 'r') abstracts = [] for line in folder: abstracts.append(line) txt_to_list_end_status_key = "Converted text to list" status_logger(status_logger_name, txt_to_list_end_status_key) return abstracts def dirty_element_generator(texts, status_logger_name): '''Finds all the elements which have the special character in them, makes a list and referes through them durng the next phases''' dirty_element_generator_start_status_key = "Generating list with special elements for weeding out later" status_logger(status_logger_name, dirty_element_generator_start_status_key) for text in texts: elements = text.split(" ") for element in elements: if('\\' in element): dirty_elements.append(element) dirty_element_generator_end_status_key = "Generated list with special elements for weeding out later" status_logger(status_logger_name, dirty_element_generator_end_status_key) return dirty_elements def dirty_element_weeder(texts, dirty_elements, status_logger_name): '''Refers to the list of dirty variables and cleans the abstracts''' dirty_element_weeder_start_status_key = "Removing elements with special characters from the text list" status_logger(status_logger_name, dirty_element_weeder_start_status_key) cleaned_str_list =[] for text in texts: elements = text.split(" ") for element in elements: if element not in dirty_elements: cleaned_str_list.append(element) cleaned_texts.append(" ".join(lol for lol in cleaned_str_list)) cleaned_str_list = [] dirty_element_weeder_end_status_key = "Removed elements with special characters from the text list" status_logger(status_logger_name, dirty_element_weeder_end_status_key) return cleaned_texts def cleaned_abstract_dumper(abstract_directory, cleaned_texts, status_logger_name): '''Dumping the cleaned abstracts to the disc and will be referring to it henceforth in the code''' cleaned_abstract_dumper_start_status_key = "Dumping the cleaned abstract .txt to the disc" status_logger(status_logger_name, cleaned_abstract_dumper_start_status_key) pre_new_cleaned_texts_folder = abstract_directory.split(".txt")[0] new_cleaned_texts_folder = open(pre_new_cleaned_texts_folder + "_"+"CLEANED.txt", 'w') for cleaned_text in cleaned_texts: new_cleaned_texts_folder.write(cleaned_text) new_cleaned_texts_folder.write('\n') cleaned_abstract_dumper_end_status_key = "Dumped the cleaned abstract .txt to the disc" status_logger(status_logger_name, cleaned_abstract_dumper_end_status_key) return new_cleaned_texts_folder def cleaner_main(abstract_directory, status_logger_name): '''This module removes all the special characters from the abstract scrapped using the Bias tool.''' cleaner_main_start_status_key = "Entering the Cleaner module" status_logger(status_logger_name, cleaner_main_start_status_key) abstracts = txt_to_list(abstract_directory, status_logger_name) dirty_elements = dirty_element_generator(abstracts, status_logger_name) cleaned_texts = dirty_element_weeder(abstracts, dirty_elements, status_logger_name) new_cleaned_texts_folder = cleaned_abstract_dumper(abstract_directory, cleaned_texts, status_logger_name) '''Main contribution from this block of the code is the new cleaned .txt folder and cleaned abstracts. Just in case.''' cleaner_main_end_status_key = "Exiting the Cleaner module" status_logger(status_logger_name, cleaner_main_end_status_key) return cleaned_texts, new_cleaned_texts_folder
/pyResearchInsights/NLP_Engine.py
'''Hello! This module of code is a part of the larger pyResearchInsights project. This file was earlier named as Temp_Gensim_Code; code is now bifurcated into Gensim code (this) and a seperate visualization code that will be added to the repository as well. Checkout the Bias README.md for an overview of the project. Sarthak J. Shetty 24/11/2018''' '''Natural Language toolkit. Here we download the commonly used English stopwords''' import nltk; nltk.download('stopwords') '''Standard set of functions for reading and appending files''' import re '''Pandas and numpy is a dependency used by other portions of the code.''' import numpy as np import pandas as pd '''Think this stands for pretty print. Prints out stuff to the terminal in a prettier way''' from pprint import pprint '''Importing OS to get current working directory (cwd) to tackle abstracts_log_name edge cases''' import os '''Contains the language model that has to be developed.''' import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel from pyResearchInsights.common_functions import status_logger from pyResearchInsights.Visualizer import visualizer_main '''Industrial level toolkit for NLP''' import spacy import pyLDAvis import pyLDAvis.gensim_models '''Make pretty visualizations''' import matplotlib as plt '''Library to log any errors. Came across this in the tutorial.''' import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR) import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) from nltk.corpus import stopwords stop_words = stopwords.words('english') stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'com', 'https', 'url', 'link', 'abstract', 'author', 'chapter', 'springer', 'title', "the", "of", "and", "in", "to", "a", "is", "for", "from", "with", "that", "by", "are", "on", "was", "as", "were", "url:", "abstract:", "abstract", "author:", "title:", "at", "be", "an", "have", "this", "which", "study", "been", "not", "has", "its", "also", "these", "this", "can", "a", 'it', 'their', "e.g.", "those", "had", "but", "while", "will", "when", "only", "author", "title", "there", "our", "did", "as", "if", "they", "such", "than", "no", "-", "could"]) def data_reader(abstracts_log_name, status_logger_name): '''This wherer the file is being parsed from to the model''' data_reader_start_status_key = abstracts_log_name+".txt is being ported to dataframe" status_logger(status_logger_name, data_reader_start_status_key) try: '''If the NLP_Engine script is run independently, not as part of the pipeline as a whole, there would be no filename_CLEAND.txt. This ensures that that file can be processed independently.''' abstracts_txt_file_name = (abstracts_log_name.split(".txt")[0]) + "_" + 'CLEANED.txt' textual_dataframe = pd.read_csv(abstracts_txt_file_name, delimiter="\t") except FileNotFoundError: textual_dataframe = pd.read_csv(abstracts_log_name, delimiter="\t") data_reader_end_status_key = abstracts_log_name + ".txt has been ported to dataframe" status_logger(status_logger_name, data_reader_end_status_key) return textual_dataframe def textual_data_trimmer(textual_dataframe, status_logger_name): '''Converts each of the abstracts in the file into a list element, of size = (number of abstracts)''' textual_data_trimmer_start_status_key = "Trimming data and preparing list of words" status_logger(status_logger_name, textual_data_trimmer_start_status_key) textual_data = textual_dataframe.values.tolist() textual_data_trimmer_end_status_key = "Trimmed data and prepared list of words" status_logger(status_logger_name, textual_data_trimmer_end_status_key) return textual_data def sent_to_words(textual_data, status_logger_name): '''Removing unecessary characters and removing punctuations from the corpus. Resultant words are then tokenized.''' sent_to_words_start_status_key = "Tokenizing words" status_logger(status_logger_name, sent_to_words_start_status_key) for sentence in textual_data: yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) textual_data = list(sent_to_words(textual_data, status_logger_name)) sent_to_words_end_status_key = "Tokenized words" status_logger(status_logger_name, sent_to_words_end_status_key) return textual_data def bigram_generator(textual_data, status_logger_name): '''Generating bigram model from the words that are in the corpus.''' '''Bigrams: Words that occur together with a high frequency,''' bigram_generator_start_status_key = "Generating word bigrams" status_logger(status_logger_name, bigram_generator_start_status_key) bigram = gensim.models.Phrases(textual_data, min_count=5, threshold=100) bigram_mod = gensim.models.phrases.Phraser(bigram) bigram_generator_end_status_key = "Generated word bigrams" status_logger(status_logger_name, bigram_generator_end_status_key) return bigram_mod def remove_stopwords(textual_data, status_logger_name): '''This function removes the standard set of stopwords from the corpus of abstract words. We've added a bunch of other words in addition.''' remove_stopwords_start_status_key = "Removing stopwords" status_logger(status_logger_name, remove_stopwords_start_status_key) return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in textual_data] remove_stopwords_end_status_key = "Removed stopwords" status_logger(status_logger_name, remove_stopwords_end_status_key) def format_topics_sentences(ldamodel, corpus, texts): '''This function generates a dataframe that presents the dominant topic of each entry in the dataset''' sent_topics_df = pd.DataFrame() for i, row in enumerate(ldamodel[corpus]): row = sorted(row, key=lambda x: (x[1]), reverse=True) for j, (topic_num, prop_topic) in enumerate(row): if j == 0: wp = ldamodel.show_topic(topic_num) topic_keywords = ", ".join([word for word, prop in wp]) sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True) else: break sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords'] contents = pd.Series(texts) sent_topics_df = pd.concat([sent_topics_df, contents], axis=1) return (sent_topics_df) def make_bigrams(textual_data, status_logger_name): '''Generates multiple bigrams of word pairs in phrases that commonly occuring with each other over the corpus''' make_bigrams_start_status_key = "Generating bigrams" status_logger(status_logger_name, make_bigrams_start_status_key) bigram_mod = bigram_generator(textual_data, status_logger_name) return [bigram_mod[doc] for doc in textual_data] make_bigrams_end_status_key = "Generated bigrams" status_logger(status_logger_name, make_bigrams_end_status_key) def lemmatization(status_logger_name, textual_data, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']): '''Reducing a word to the root word. Running -> Run for example''' lemmatization_start_status_key = "Beginning lemmatization" status_logger(status_logger_name, lemmatization_start_status_key) texts_out = [] try: nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) except OSError: from spacy.cli import download download('en_core_web_sm') nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner']) for sent in textual_data: doc = nlp(" ".join(sent)) texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags]) lemmatization_end_status_key = "Ending lemmatization" status_logger(status_logger_name, lemmatization_end_status_key) return texts_out def nlp_engine_main(abstracts_log_name, status_logger_name, num_topics = None, num_keywords = None, mallet_path = None): nlp_engine_main_start_status_key = "Initiating the NLP Engine" status_logger(status_logger_name, nlp_engine_main_start_status_key) '''We can arrive at logs_folder_name from abstracts_log_name, instead of passing it to the NLP_Engine function each time''' if('Abstract' in abstracts_log_name): logs_folder_name = abstracts_log_name.split('Abstract')[0][:-1] else: '''If the user points to an abstracts_log_name that does not contain 'Abstract' and lies at the current working directory then set the logs_folder_name as cwd''' logs_folder_name = '' if(logs_folder_name == ''): '''This condition is required, if the file is located at the directory of the pyResearchInsights code.''' logs_folder_name = logs_folder_name + os.getcwd() '''Declaring the number of topics to be generated by the LDA model''' if num_topics == None: '''If the user has not provided this argument then set to 10''' num_topics = 10 '''Declaring the number of keywords to be presented by the Visualizer''' if num_keywords == None: '''If the user has not provided this argument then set to 20''' num_keywords = 20 '''Extracts the data from the .txt file and puts them into a Pandas dataframe buckets''' textual_dataframe = data_reader(abstracts_log_name, status_logger_name) '''Rids the symbols and special characters from the textual_data''' textual_data = textual_data_trimmer(textual_dataframe, status_logger_name) '''Removes stopwords that were earlier downloaded from the textual_data''' textual_data_no_stops = remove_stopwords(textual_data, status_logger_name) '''Prepares bigrams''' textual_data_words_bigrams = make_bigrams(textual_data_no_stops, status_logger_name) '''Lemmatization: Running -> Run''' textual_data_lemmatized = lemmatization(status_logger_name, textual_data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']) '''Creating a dictionary for each term as the key, and the value as their frequency in that sentence.''' id2word = corpora.Dictionary(textual_data_lemmatized) texts = textual_data_lemmatized '''Creating a dictionary for the entire corpus and not just individual abstracts and documents.''' corpus = [id2word.doc2bow(text) for text in texts] '''Builds the actual LDA model that will be used for the visualization and inference''' lda_model_generation_start_status_key = "Generating the LDA model using default parameter set" status_logger(status_logger_name, lda_model_generation_start_status_key) if(mallet_path): lda_model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics = num_topics, id2word=id2word) '''Generating a dataset to show which ''' df_topic_sents_keywords = format_topics_sentences(ldamodel = lda_model, corpus = corpus, texts = textual_data) df_dominant_topic = df_topic_sents_keywords.reset_index() df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text'] df_dominant_topic.to_csv(logs_folder_name + '/Master_Topic_Per_Sentence.csv') '''Generating a dataset to present the percentage of papers under each topic, their keywords and number of papers''' sent_topics_sorteddf_mallet = pd.DataFrame() sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic') for i, grp in sent_topics_outdf_grpd: sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet, grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)], axis=0) sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True) topic_counts = df_topic_sents_keywords['Dominant_Topic'].value_counts() topic_contribution = round(topic_counts/topic_counts.sum(), 4) sent_topics_sorteddf_mallet.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"] sent_topics_sorteddf_mallet.head() sent_topics_sorteddf_mallet['Number_Papers'] = [topic_counts[count] for count in range(num_topics)] sent_topics_sorteddf_mallet['Percentage_Papers'] = [topic_contribution[count] for count in range(0, num_topics)] sent_topics_sorteddf_mallet.to_csv(logs_folder_name+'/Master_Topics_Contribution.csv') '''Converting the mallet model to LDA for use by the Visualizer code''' lda_model = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(lda_model) else: lda_model = gensim.models.ldamodel.LdaModel(corpus = corpus, id2word = id2word, num_topics = num_topics, random_state = 100, update_every = 1, chunksize = 100, passes = 10, alpha = 'auto', per_word_topics = True) lda_model_generation_end_status_key = "Generated the LDA model using default parameter set" status_logger(status_logger_name, lda_model_generation_end_status_key) perplexity_score = lda_model.log_perplexity(corpus) perplexity_status_key = "Issued perplexity:"+" "+str(perplexity_score) status_logger(status_logger_name, perplexity_status_key) nlp_engine_main_end_status_key = "Idling the NLP Engine" status_logger(status_logger_name, nlp_engine_main_end_status_key) '''Importing the visualizer_main function to view the LDA Model built by the NLP_engine_main() function''' visualizer_main(lda_model, corpus, id2word, textual_data_lemmatized, num_topics, num_keywords, logs_folder_name, status_logger_name) return 0
/pyResearchInsights/Scraper.py
'''The aim of this script is to scrape abstracts, author names and date of publication from Springer Sarthak J. Shetty 04/08/2018''' '''Adding the libraries to be used here.''' '''Importing urllib.request to use urlopen''' from urllib.request import build_opener, HTTPCookieProcessor ''''Importing urllib.error to handle errors in HTTP pinging.''' import urllib.error '''BeautifulSoup is used for souping.''' from bs4 import BeautifulSoup as bs '''Counter generates a dictionary from the abstract data, providing frequencies of occurences''' from collections import Counter '''Importing the CSV library here to dump the dictionary for further analysis and error checking if required. Will edit it out later.''' import csv '''Importing numpy to generate a random integer for the delay_function (see below)''' import numpy as np '''This library is imported to check if we can feasibly introduce delays into the processor loop to reduce instances of the remote server, shutting the connection while scrapping extraordinarily large datasets.''' import time '''Fragmenting code into different scripts. Some functions are to be used across the different sub-parts as well. Hence, shifted some of the functions to the new script.''' from pyResearchInsights.common_functions import pre_processing, argument_formatter, keyword_url_generator, abstract_id_log_name_generator, status_logger def url_reader(url, status_logger_name): '''This keyword is supplied to the URL and is hence used for souping. Encountered an error where some links would not open due to HTTP.error This is added here to try and ping the page. If it returns false the loop ignores it and moves on to the next PII number''' try: '''Using the urllib function, urlopen to extract the html_code of the given page''' open_connection = build_opener(HTTPCookieProcessor()) html_code = open_connection.open(url) '''Closing the abstract window after each abstract has been extracted''' return html_code except (UnboundLocalError, urllib.error.HTTPError): pass def results_determiner(url, status_logger_name): '''This function determines the number of results that a particular keywords returns once it looks up the keyword on link.springer.com The function returns all the possible links containing results and then provides the total number of results returned by a particular keyword, or combination of keywords.''' first_page_to_scrape = url_reader(url, status_logger_name) first_page_to_scrape_soup = page_souper(first_page_to_scrape, status_logger_name) number_of_results = first_page_to_scrape_soup.find('h1', {'id':'number-of-search-results-and-search-terms'}).find('strong').text results_determiner_status_key = "Total number of results obtained: "+number_of_results status_logger(status_logger_name, results_determiner_status_key) def url_generator(start_url, query_string, status_logger_name): '''This function is written to scrape all possible webpages of a given topic The search for the URLs truncates when determiner variable doesn't return a positive value''' url_generator_start_status_key = start_url+" "+"start_url has been received" status_logger(status_logger_name, url_generator_start_status_key) '''Initiallizing a list here in order to contain the URLs. Even if a URL does not return valid results, it is popped later on from the list.''' urls_to_scrape=[] counter = 0 total_url = start_url+str(counter)+"?facet-content-type=\"Article\"&query="+query_string+"&facet-language=\"En\"" initial_url_status_key = total_url+" "+"has been obtained" status_logger(status_logger_name, initial_url_status_key) urls_to_scrape.append(total_url) test_soup = bs(url_reader(total_url, status_logger_name), 'html.parser') '''Here, we grab the page element that contains the number of pages to be scrapped''' determiner = test_soup.findAll('span', {'class':'number-of-pages'})[0].text '''We generate the urls_to_scrape from the stripped down determiner element''' urls_to_scrape = [(start_url+str(counter)+"?facet-content-type=\"Article\"&query="+query_string+"&facet-language=\"En\"") for counter in range(1, (int(determiner.replace(',', '')) + 1))] url_generator_stop_status_key = determiner.replace(',', '') + " page URLs have been obtained" status_logger(status_logger_name, url_generator_stop_status_key) return urls_to_scrape def page_souper(page, status_logger_name): '''Function soups the webpage elements and provided the tags for search. Note: Appropriate encoding has to be picked up beenfore souping''' page_souper_start_status_key = "Souping page" status_logger(status_logger_name, page_souper_start_status_key) page_soup = bs(page, 'html.parser') page_souper_stop_status_key = "Souped page" status_logger(status_logger_name, page_souper_stop_status_key) return page_soup def abstract_word_extractor(abstract, abstract_title, abstract_year, permanent_word_sorter_list, trend_keywords, status_logger_name): '''This function creates the list that stores the text in the form of individual words against their year of appearence.''' abstract_word_sorter_start_status_key = "Adding:"+" "+abstract_title+" "+"to the archival list" status_logger(status_logger_name, abstract_word_sorter_start_status_key) '''This line of code converts the entire abstract into lower case''' abstract = abstract.lower() '''Converting the abstract into a list of words''' abstract_word_list = abstract.split() '''This line of code sorts the elements in the word list alphabetically. Working with dataframes is harden, hence we are curbing this issue by modifying the list rather.''' abstract_word_list.sort() '''If the word currently being looped in the abstract list matches the trend word being investigated for, the year it appears is appended to the permanent word sorter list''' for element in abstract_word_list: if(element==trend_keywords[0]): permanent_word_sorter_list.append(abstract_year[:4]) abstract_word_sorter_end_status_key = "Added:"+" "+abstract_title+" "+"to the archival list" status_logger(status_logger_name, abstract_word_sorter_end_status_key) def abstract_year_list_post_processor(permanent_word_sorter_list, status_logger_name): '''Because of this function we have a dictionary containing the frequency of occurrence of terms in specific years''' abstract_year_list_post_processor_start_status_key = "Post processing of permanent word sorter list has commenced" status_logger(status_logger_name, abstract_year_list_post_processor_start_status_key) abstract_year_dictionary = Counter(permanent_word_sorter_list) abstract_year_list_post_processor_end_status_key = "Post processing of permanent word sorter list has completed" status_logger(status_logger_name, abstract_year_list_post_processor_end_status_key) return abstract_year_dictionary def abstract_year_dictionary_dumper(abstract_word_dictionary, abstracts_log_name, status_logger_name): '''This function saves the abstract word dumper to the disc for further inspection. The file is saved as a CSV bucket and then dumped.''' permanent_word_sorter_list_start_status_key = "Dumping the entire dictionary to the disc" status_logger(status_logger_name, permanent_word_sorter_list_start_status_key) with open(abstracts_log_name+"_"+"DICTIONARY.csv", 'w') as dictionary_to_csv: writer = csv.writer(dictionary_to_csv) for key, value in abstract_word_dictionary.items(): year = key writer.writerow([year, value]) permanent_word_sorter_list_end_status_key = "Dumped the entire dictionary to the disc" status_logger(status_logger_name, permanent_word_sorter_list_end_status_key) def abstract_page_scraper(abstract_url, abstract_input_tag_id, abstracts_log_name, permanent_word_sorter_list, site_url_index, status_logger_name): '''This function is written to scrape the actual abstract of the specific paper, that is being referenced within the list of abstracts''' abstract_page_scraper_status_key="Abstract ID:"+" "+abstract_input_tag_id status_logger(status_logger_name, abstract_page_scraper_status_key) abstract_page_url = abstract_url+abstract_input_tag_id abstract_page = url_reader(abstract_page_url, status_logger_name) abstract_soup = page_souper(abstract_page, status_logger_name) title = title_scraper(abstract_soup, status_logger_name) abstract_date = abstract_date_scraper(title, abstract_soup, status_logger_name) '''Due to repeated attribute errors with respect to scraping the authors name, these failsafes had to be put in place.''' try: author = author_scraper(abstract_soup, status_logger_name) except AttributeError: author = "Author not available" '''Due to repeated attribute errors with respect to scraping the abstract, these failsafes had to be put in place.''' try: abstract = abstract_scraper(abstract_soup) # abstract_word_extractor(abstract, title, abstract_date, permanent_word_sorter_list, trend_keywords, status_logger_name) except AttributeError: abstract = "Abstract not available" abstract_database_writer(abstract_page_url, title, author, abstract, abstracts_log_name, abstract_date, status_logger_name) analytical_abstract_database_writer(title, author, abstract, abstracts_log_name, status_logger_name) def abstract_crawler(abstract_url, abstract_id_log_name, abstracts_log_name, permanent_word_sorter_list, site_url_index, status_logger_name): abstract_crawler_start_status_key = "Entered the Abstract Crawler" status_logger(status_logger_name, abstract_crawler_start_status_key) abstract_crawler_temp_index = site_url_index '''This function crawls the page and access each and every abstract''' abstract_input_tag_ids = abstract_id_database_reader(abstract_id_log_name, abstract_crawler_temp_index, status_logger_name) for abstract_input_tag_id in abstract_input_tag_ids: try: abstract_crawler_accept_status_key="Abstract Number:"+" "+str((abstract_input_tag_ids.index(abstract_input_tag_id)+1)+abstract_crawler_temp_index*20) status_logger(status_logger_name, abstract_crawler_accept_status_key) abstract_page_scraper(abstract_url, abstract_input_tag_id, abstracts_log_name, permanent_word_sorter_list, site_url_index, status_logger_name) except TypeError: abstract_crawler_reject_status_key="Abstract Number:"+" "+str(abstract_input_tag_ids.index(abstract_input_tag_id)+1)+" "+"could not be processed" status_logger(status_logger_name, abstract_crawler_reject_status_key) pass abstract_crawler_end_status_key = "Exiting the Abstract Crawler" status_logger(status_logger_name, abstract_crawler_end_status_key) def analytical_abstract_database_writer(title, author, abstract, abstracts_log_name, status_logger_name): '''This function will generate a secondary abstract file that will contain only the abstract. The abstract file generated will be passed onto the Visualizer and Analyzer function, as opposed to the complete abstract log file containing lot of garbage words in addition to the abstract text.''' analytical_abstract_database_writer_start_status_key = "Writing"+" "+title+" "+"by"+" "+author+" "+"to analytical abstracts file" status_logger(status_logger_name, analytical_abstract_database_writer_start_status_key) analytical_abstracts_txt_log = open(abstracts_log_name+'_'+'ANALYTICAL'+'.txt', 'a') analytical_abstracts_txt_log.write(abstract) analytical_abstracts_txt_log.write('\n'+'\n') analytical_abstracts_txt_log.close() analytical_abstract_database_writer_stop_status_key = "Written"+" "+title+" "+"to disc" status_logger(status_logger_name, analytical_abstract_database_writer_stop_status_key) def abstract_database_writer(abstract_page_url, title, author, abstract, abstracts_log_name, abstract_date, status_logger_name): '''This function makes text files to contain the abstracts for future reference. It holds: 1) Title, 2) Author(s), 3) Abstract''' abstract_database_writer_start_status_key = "Writing"+" "+title+" "+"by"+" "+author+" "+"to disc" status_logger(status_logger_name, abstract_database_writer_start_status_key) abstracts_csv_log = open(abstracts_log_name+'.csv', 'a') abstracts_txt_log = open(abstracts_log_name+'.txt', 'a') abstracts_txt_log.write("Title:"+" "+title) abstracts_txt_log.write('\n') abstracts_txt_log.write("Author:"+" "+author) abstracts_txt_log.write('\n') abstracts_txt_log.write("Date:"+" "+abstract_date) abstracts_txt_log.write('\n') abstracts_txt_log.write("URL:"+" "+abstract_page_url) abstracts_txt_log.write('\n') abstracts_txt_log.write("Abstract:"+" "+abstract) abstracts_csv_log.write(abstract) abstracts_csv_log.write('\n') abstracts_txt_log.write('\n'+'\n') abstracts_txt_log.close() abstracts_csv_log.close() abstract_database_writer_stop_status_key = "Written"+" "+title+" "+"to disc" status_logger(status_logger_name, abstract_database_writer_stop_status_key) def abstract_id_database_reader(abstract_id_log_name, site_url_index, status_logger_name): '''This function has been explicitly written to access the abstracts database that the given prgram generates.''' abstract_id_reader_temp_index = site_url_index abstract_id_database_reader_start_status_key = "Extracting Abstract IDs from disc" status_logger(status_logger_name, abstract_id_database_reader_start_status_key) lines_in_abstract_id_database=[line.rstrip('\n') for line in open(abstract_id_log_name+str(abstract_id_reader_temp_index+1)+'.txt')] abstract_id_database_reader_stop_status_key = "Extracted Abstract IDs from disc" status_logger(status_logger_name, abstract_id_database_reader_stop_status_key) return lines_in_abstract_id_database def abstract_id_database_writer(abstract_id_log_name, abstract_input_tag_id, site_url_index): '''This function writes the abtract ids to a .txt file for easy access and documentation.''' abstract_id_writer_temp_index = site_url_index abstract_id_log = open((abstract_id_log_name+str(abstract_id_writer_temp_index+1)+'.txt'), 'a') abstract_id_log.write(abstract_input_tag_id) abstract_id_log.write('\n') abstract_id_log.close() def abstract_date_scraper(title, abstract_soup, status_logger_name): '''This function scrapes the date associated with each of the abstracts. This function will play a crucial role in the functionality that we are trying to build into our project.''' date_scraper_entry_status_key = "Scraping date of the abstract titled:"+" "+title status_logger(status_logger_name, date_scraper_entry_status_key) try: abstract_date = abstract_soup.find('time').get('datetime') date_scraper_exit_status_key = title+" "+"was published on"+" "+abstract_date except AttributeError: abstract_date = "Date for abstract titled:"+" "+title+" "+"was not available" date_scraper_exit_status_key = abstract_date pass status_logger(status_logger_name, date_scraper_exit_status_key) return abstract_date def abstract_scraper(abstract_soup): '''This function scrapes the abstract from the soup and returns to the page scraper''' abstract = str(abstract_soup.find('div', {'id':'Abs1-content'}).text.encode('utf-8'))[1:] return abstract def author_scraper(abstract_soup, status_logger_name): '''This function scrapes the author of the text, for easy navigation and search''' author_scraper_start_status_key = "Scraping the author name" status_logger(status_logger_name, author_scraper_start_status_key) '''This class element's text attribute contains all the authors names. It is converted to a findAll() list and then concatinated into a string for storage.''' author = ''.join(str(author) for author in [authorElement.text for authorElement in abstract_soup.findAll('li', {'class':'c-author-list__item'})]) author_scraper_end_status_key = "Scraped the author's name:" + " "+str(author) status_logger(status_logger_name, author_scraper_end_status_key) return author def title_scraper(abstract_soup, status_logger_name): '''This function scrapes the title of the text from the abstract''' title_scraper_start_status_key = "Scraping the title of the abstract" status_logger(status_logger_name, title_scraper_start_status_key) '''Purpose of this block is to retrieve the title of the text even if an AttributeError arises''' try: title = str(abstract_soup.find('h1', {'class':'c-article-title'}).text.encode('utf-8'))[1:] '''In case an incorrectly classified asset is to be scrapped (Journal/Chapter as opposed to Article), go through this block in an attempt to retrieve the title.''' except AttributeError: try: title = str(abstract_soup.find('h1',{'class':'ChapterTitle'}).text.encode('utf-8'))[1:] except AttributeError: try: title = (abstract_soup.find('span', {'class':'JournalTitle'}).text) except AttributeError: title = "Title not available" title_scraper_end_status_key = "Scraped the title of the abstract" status_logger(status_logger_name, title_scraper_end_status_key) return title def abstract_id_scraper(abstract_id_log_name, page_soup, site_url_index, status_logger_name): '''This function helps in obtaining the PII number of the abstract. This number is then coupled with the dynamic URL and provides''' abstract_id_scraper_start_status_key="Scraping IDs" status_logger(status_logger_name, abstract_id_scraper_start_status_key) ''''This statement collects all the input tags that have the abstract ids in them''' abstract_input_tags = page_soup.findAll('a', {'class':'title'}) for abstract_input_tag in abstract_input_tags: abstract_input_tag_id=abstract_input_tag.get('href') abstract_id_database_writer(abstract_id_log_name, abstract_input_tag_id, site_url_index) abstract_id_scraper_stop_status_key="Scraped IDs" status_logger(status_logger_name, abstract_id_scraper_stop_status_key) def word_sorter_list_generator(status_logger_name): word_sorter_list_generator_start_status_key = "Generating the permanent archival list" status_logger(status_logger_name, word_sorter_list_generator_start_status_key) '''This function generates the list that hold the Words and corresponding Years of the abstract data words before the actual recursion of scrapping data from the website begins.''' word_sorter_list = [] word_sorter_list_generator_exit_status_key = "Generated the permanent archival list" status_logger(status_logger_name, word_sorter_list_generator_exit_status_key) return word_sorter_list def delay_function(status_logger_name): '''Since the Springer servers are contstantly shutting down the remote connection, we introduce this function in the processor function in order to reduce the number of pings it delivers to the remote.''' delay_variable = np.random.randint(0, 20) delay_function_start_status_key = "Delaying remote server ping:"+" "+str(delay_variable)+" "+"seconds" status_logger(status_logger_name, delay_function_start_status_key) '''Sleep parameter causes the code to be be delayed by 1 second''' time.sleep(delay_variable) delay_function_end_status_key = "Delayed remote server ping:"+" "+str(delay_variable)+" "+"seconds" status_logger(status_logger_name, delay_function_end_status_key) def processor(abstract_url, urls_to_scrape, abstract_id_log_name, abstracts_log_name, status_logger_name, keywords_to_search): ''''Multiple page-cycling function to scrape multiple result pages returned from Springer. print(len(urls_to_scrape))''' '''This list will hold all the words mentioned in all the abstracts. It will be later passed on to the visualizer code to generate the trends histogram.''' permanent_word_sorter_list = word_sorter_list_generator(status_logger_name) for site_url_index in range(0, len(urls_to_scrape)): if(site_url_index==0): results_determiner(urls_to_scrape[site_url_index], status_logger_name) '''Collects the web-page from the url for souping''' page_to_soup = url_reader(urls_to_scrape[site_url_index], status_logger_name) '''Souping the page for collection of data and tags''' page_soup = page_souper(page_to_soup, status_logger_name) '''Scrapping the page to extract all the abstract IDs''' abstract_id_scraper(abstract_id_log_name, page_soup, site_url_index, status_logger_name) '''Actually obtaining the abstracts after combining ID with the abstract_url''' abstract_crawler(abstract_url, abstract_id_log_name, abstracts_log_name, permanent_word_sorter_list, site_url_index, status_logger_name) '''Delaying after each page being scrapped, rather than after each abstract''' delay_function(status_logger_name) '''This line of code processes and generates a dictionary from the abstract data''' abstract_year_dictionary = abstract_year_list_post_processor(permanent_word_sorter_list, status_logger_name) return abstract_year_dictionary def scraper_main(keywords_to_search, abstracts_log_name, status_logger_name): ''''This function contains all the functions and contains this entire script here, so that it can be imported later to the main function''' '''Here, we utilize the keywords provided by the user to generate the URLs for scrapping''' start_url, abstract_url, query_string = keyword_url_generator(keywords_to_search) '''Since we receive only the abstracts_log_name, we have to extract the abstract_id_log_name''' abstract_id_log_name = abstract_id_log_name_generator(abstracts_log_name) if(type(keywords_to_search) == str): '''If the user ran the code using just the function from the library, then the keywords and trends words need to be in this format''' keywords_to_search = argument_formatter(keywords_to_search) else: keywords_to_search = keywords_to_search '''Provides the links for the URLs to be scraped by the scraper''' urls_to_scrape = url_generator(start_url, query_string, status_logger_name) '''Calling the processor() function here''' abstract_year_dictionary = processor(abstract_url, urls_to_scrape, abstract_id_log_name, abstracts_log_name, status_logger_name, keywords_to_search) '''This function dumps the entire dictionary onto the disc for further analysis and inference.''' abstract_year_dictionary_dumper(abstract_year_dictionary, abstracts_log_name, status_logger_name) return 0
/pyResearchInsights/Visualizer.py
'''Hello! This code code is part of the pyResearchInsights project. We will be displaying the results from the NLP_Engine.py code here, using primarily using pyLDAvis library. Check out the repository README.md for a high-level overview of the project and the objective. Sarthak J. Shetty 24/11/2018''' from pyResearchInsights.common_functions import argument_formatter, status_logger '''import matplotlib as plt''' import matplotlib.pyplot as plt '''Library necessary to develop the html visualizations''' import pyLDAvis '''Generating dictionary from the textual_data that is lemmatized''' from collections import Counter '''Importing pandas to create the dataframes to plot the histograms''' import pandas as pd '''Importing the colormap functions using matplotlib''' from matplotlib import cm def visualizer_generator(lda_model, corpus, id2word, logs_folder_name, status_logger_name): '''This code generates the .html file with generates the visualization of the data prepared.''' visualizer_generator_start_status_key = "Preparing the topic modeling visualization" status_logger(status_logger_name, visualizer_generator_start_status_key) '''Here, we generate the actual topic modelling visualization from thghe model created by pyLDAvis''' textual_data_visualization = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word) pyLDAvis.save_html(textual_data_visualization, logs_folder_name+"/"+"Data_Visualization_Topic_Modelling.html") '''Here, we generate the order of topics according to the LDA visualization''' topic_order = [textual_data_visualization[0].iloc[topic].name for topic in range(lda_model.num_topics)] return topic_order visualizer_generator_end_status_key = "Prepared the topic modeling visualization"+" "+logs_folder_name+"/"+"Data_Visualization_Topic_Modelling.html" status_logger(status_logger_name, visualizer_generator_end_status_key) def topic_builder(lda_model, topic_order, num_topics, num_keywords, textual_data_lemmatized, logs_folder_name, status_logger_name): '''We generate histograms here to present the frequency and weights of the keywords of each topic and save them to the disc for further analysis''' topic_builder_start_status_key = "Preparing the frequency and weights vs keywords charts" status_logger(status_logger_name, topic_builder_start_status_key) '''Setting the colormaps here to generate the num_topics charts that proceed''' colorchart = cm.get_cmap('plasma', num_topics) topics = lda_model.show_topics(num_topics = -1, num_words = num_keywords, formatted=False) data_flat = [w for w_list in textual_data_lemmatized for w in w_list] counter = Counter(data_flat) '''Generating a pandas dataframe that contains the word, topic_id, importance and word_count''' out = [] for i, topic in topics: for word, weight in topic: out.append([word, i , weight, counter[word]]) '''We will use bits of this dataframe across this function''' df = pd.DataFrame(out, columns=['word', 'topic_id', 'importance', 'word_count']) for topic in topic_order: '''Progressively generating the figures comprising the weights and frequencies for each keyword in each topic''' _, ax = plt.subplots(1, 1, figsize=[20, 15]) x_axis = [x_axis_element for x_axis_element in range(0, num_keywords)] '''Creating the x_axis labels here, which is the topic keywords''' x_axis_labels = [element for element in df.loc[df.topic_id==topic, 'word']] y_axis = [round(element, 2) for element in df.loc[df.topic_id==topic, 'word_count']] '''Here, we make sure that the y_axis labels are equally spaced, and that there are 10 of them''' word_count_list = [word_count for word_count in df.loc[df.topic_id==topic, 'word_count']] word_count_increment = (max(word_count_list)/10) y_axis_labels = [round(0 + increment*(word_count_increment)) for increment in range(0, 10)] '''Here, we make sure that the y_axis_twin labels are equally spaced, and that there are 10 of them''' word_importance_list = [word_count for word_count in df.loc[df.topic_id==topic, 'importance']] word_importance_increment = (max(word_importance_list)/10) y_axis_twin_labels = [0 + increment*(word_importance_increment) for increment in range(0, 10)] plt.xticks(x_axis, x_axis_labels, rotation=40, horizontalalignment='right', fontsize = 25) ax.bar(x_axis, y_axis, width=0.5, alpha=0.3, color=colorchart.colors[topic], label="Word Count") ax.set_yticks(y_axis_labels) ax.tick_params(axis = 'y', labelsize = 25) ax.set_ylabel('Word Count', color=colorchart.colors[topic], fontsize = 25) ax.legend(loc='upper left', fontsize = 20) '''Generating the second set of barplots here''' ax_twin = ax.twinx() ax_twin.bar(x_axis, df.loc[df.topic_id==topic, 'importance'], width=0.2, color=colorchart.colors[topic], label = "Weight") ax_twin.set_ylabel('Weight', color=colorchart.colors[topic], fontsize = 25) ax_twin.set_yticks(y_axis_twin_labels) ax_twin.tick_params(axis='y', labelsize = 25) ax_twin.legend(loc='upper right', fontsize = 20) plt.title('Topic Number: '+str(topic_order.index(topic) + 1), color=colorchart.colors[topic], fontsize=25) '''Saving each of the charts generated to the disc''' plt.savefig(logs_folder_name + '/FrequencyWeightChart_TopicNumber_' + str(topic_order.index(topic) + 1) + '.png') topic_builder_end_status_key = "Prepared the frequency and weights vs keywords charts" status_logger(status_logger_name, topic_builder_end_status_key) def trends_histogram(abstracts_log_name, logs_folder_name, trend_keywords, status_logger_name): '''This function is responsible for generating the histograms to visualizations the trends in research topics.''' trends_histogram_start_status_key = "Generating the trends histogram" status_logger(status_logger_name, trends_histogram_start_status_key) '''What's happening here? a) trends_histogram receives the dictionary filename for the dictionary prepared by the Scraper code. b) Information is organized in a conventional key and value form; key=year, value=frequency. c) We extract the key from the dictionary and generate a new list comprising of the years in which the trend keywords occurs. d) We calculate the max and min years in this new list and convert them to int and extract the complete set of years that lie between these extremes. e) We cycle through the keys in the dictionary and extract frequency of occurrence for each year in the list of years. f) If the term does not appear in that year, then it's assigned zero (that's how dictionaries work). g) The two lists (list of years and list of frequencies) are submitted to the plot function for plotting.''' '''This list will hold the abstract years which contain occurrences of the word that we are investigating''' list_of_years=[] list_of_frequencies = [] '''Accessing the dictionary data dumped by the Scraper code''' abstract_word_dictionary_file = open(abstracts_log_name + '_DICTIONARY.csv', 'r') '''Here we collect the dictionary data dumped by the Scraper code''' for line in abstract_word_dictionary_file: list_of_years.append(int(line.split(',')[0])) list_of_frequencies.append(int(line.split(',')[1][:-1])) '''Tabulating the start and the ending years of appearence of the specific trend_keywords''' starting_year = min(list_of_years) ending_year = max(list_of_years) '''Recreating the actual dictionary here''' abstract_word_dictionary = {list_of_years[year]:list_of_frequencies[year] for year in range(0, len(list_of_years))} '''Generating a continuous list of years to be plotted from the abstracts collected''' list_of_years_to_be_plotted = [year for year in range((starting_year), (ending_year)+1)] frequencies_to_be_plotted = [] '''Here we generate the corresponding frequencies for each of the years recorded''' for year in range(starting_year, ending_year+1): try: frequencies_to_be_plotted.append(abstract_word_dictionary[year]) except KeyError: frequencies_to_be_plotted.append(0) '''Here, we will generate a list of frequencies to be plotted along the Y axis, using the Y ticks function''' y_ticks_frequency = [] '''Extracting the largest frequency value in the list to generate the Y ticks list''' max_frequency_value = max(frequencies_to_be_plotted) for frequency_element in range(0, max_frequency_value+1): y_ticks_frequency.append(frequency_element) '''Varying the size of the figure to accommodate the entire trends graph generated''' plt.figure(figsize=[15,10]) '''Plotting the years along the X axis and the frequency along the Y axis''' plt.plot(list_of_years_to_be_plotted, frequencies_to_be_plotted) '''Plotting the frequencies again to make the frequency pivots visible''' plt.plot(list_of_years_to_be_plotted, frequencies_to_be_plotted, 'ro') '''Here, we are labeling each of the frequencies plotted to ensure better readability, instead of second-guessing Y axis values''' for element in range(0, len(list_of_years_to_be_plotted)): '''Avoiding the unnecessary clutter in the visualization by removing text boxes for frequency=0''' if(frequencies_to_be_plotted[element]!=0): plt.text(list_of_years_to_be_plotted[element], frequencies_to_be_plotted[element], "Frequency: "+str(frequencies_to_be_plotted[element]), bbox=dict(facecolor='orange', alpha=0.3), horizontalalignment='right', verticalalignment='top',size=8) '''Adds a label to the element being represented across the Y-axis (frequency of occurrence)''' plt.ylabel("Frequency of occurrence:"+" "+trend_keywords[0]) '''Adds a label to the element being represented across the X-axis (years)''' plt.xlabel("Year of occurrence:"+" "+trend_keywords[0]) '''Adds an overall title to the trends chart''' plt.title("Trends Chart:"+" "+trend_keywords[0]) '''xticks() ensures that each and every year is plotted along the x axis and changing the rotation to ensure better readability''' plt.xticks(list_of_years_to_be_plotted, rotation=45) '''yticks() ensures that each and every frequency is plotted to ensure better readability in the resulting figure''' plt.yticks(y_ticks_frequency) '''Saves the graph generated to the disc for further analysis''' plt.savefig(logs_folder_name+"/"+"Data_Visualization_Trends_Graph"+"_"+trend_keywords[0]+".png") trends_histogram_end_status_key = "Generated the trends graph"+" "+logs_folder_name+"/"+"Data_Visualization_Trends_Graph"+"_"+trend_keywords[0]+".png" status_logger(status_logger_name, trends_histogram_end_status_key) def visualizer_main(lda_model, corpus, id2word, textual_data_lemmatized, num_topics, num_keywords, logs_folder_name, status_logger_name): visualizer_main_start_status_key = "Entering the visualizer_main() code" status_logger(status_logger_name, visualizer_main_start_status_key) '''This the main visualizer code. Reorging this portion of the code to ensure modularity later on as well.''' topic_order = visualizer_generator(lda_model, corpus, id2word, logs_folder_name, status_logger_name) '''We generate histograms here to present the frequency and weights of the keywords of each topic''' topic_builder(lda_model, topic_order, num_topics, num_keywords, textual_data_lemmatized, logs_folder_name, status_logger_name) visualizer_main_end_status_key = "Exiting the visualizer_main() code" status_logger(status_logger_name, visualizer_main_end_status_key)
/pyResearchInsights/__init__.py
from pyResearchInsights.Scraper import scraper_main from pyResearchInsights.Cleaner import cleaner_main from pyResearchInsights.Analyzer import analyzer_main from pyResearchInsights.NLP_Engine import nlp_engine_main from pyResearchInsights.common_functions import pre_processing, arguments_parser, end_process from pyResearchInsights.system_functions import tarballer, rm_original_folder
/pyResearchInsights/common_functions.py
'''Hello! This script contains functions that are resued by other pieces of code and scripts belonging to this project as well. Checkout the README.md for more details regarding the project itself. Sarthak J Shetty 12/09/2018''' '''datetime is used while building the database logs''' from datetime import datetime '''Importing OS functions to build the folders for the LOG run here as well''' import os '''Importing argparse to parse the keywords, then supplied to the Scraper.py code''' import argparse def status_logger(status_logger_name, status_key): '''Status logger to print and log details throught the running the program. Declaring current_hour, current_minute & current_second.''' current_hour = str(datetime.now().time().hour) current_minute = str(datetime.now().time().minute) current_second = str(datetime.now().time().second) '''Logging the complete_status_key and printing the complete_status_key''' complete_status_key = "[INFO]"+current_hour+":"+current_minute+":"+current_second+" "+status_key print(complete_status_key) status_log = open(status_logger_name+'.txt', 'a') status_log.write(complete_status_key+"\n") status_log.close() def status_logger_creator(abstracts_log_name): '''This is a standalone status_logger and session_folder filename generator, if someone is using the Bias components as standalone functions''' session_folder_name = abstracts_log_name.split('/')[-1] os.makedirs(session_folder_name) status_logger_name = session_folder_name+"/"+"Status_Logger" return status_logger_name, session_folder_name def pre_processing(keywords): '''This function contains all the pre-processing statements related to the running of the program, including: 1. Abstracts LOG Name 2. Status Logger Name''' if((type(keywords) == str)): '''If the user uses the function independently of the argument_parser() we need this to convert the keywords to a list of words''' keywords = argument_formatter(keywords) '''Declaring the time and date variables here. Year, month, day, hours, minute & seconds.''' run_start_year = str(datetime.now().date().year) run_start_month = str(datetime.now().date().month) run_start_day = str(datetime.now().date().day) run_start_date = str(datetime.now().date()) run_start_hour = str(datetime.now().time().hour) run_start_minute = str(datetime.now().time().minute) run_start_second = str(datetime.now().time().second) '''Keywords have to be written into the filename of the LOG that we are running''' folder_attachement = "" if(len(keywords)==1): folder_attachement = keywords[0] else: for keyword_index in range(0, len(keywords)): if((keyword_index+1)==len(keywords)): folder_attachement = folder_attachement+keywords[keyword_index] else: folder_attachement = folder_attachement+keywords[keyword_index]+"_" '''Declaring the LOG folder and the abstract, abstract_id & status_logger files.''' logs_folder_name = "LOGS"+"/"+"LOG"+"_"+run_start_date+'_'+run_start_hour+'_'+run_start_minute+"_"+folder_attachement abstracts_log_name = logs_folder_name+"/"+'Abstract_Database'+'_'+run_start_date+'_'+run_start_hour+'_'+run_start_minute status_logger_name = logs_folder_name+"/"+'Status_Logger'+'_'+run_start_date+'_'+run_start_hour+'_'+run_start_minute '''If the filename does not exist create the file in the LOG directory''' if not os.path.exists(logs_folder_name): os.makedirs(logs_folder_name) '''Creating the status_log and writing the session duration & date''' status_log = open(status_logger_name+'.txt', 'a') status_log.write("Session:"+" "+run_start_day+"/"+run_start_month+"/"+run_start_year+"\n") status_log.write("Time:"+" "+run_start_hour+":"+run_start_minute+":"+run_start_second+"\n") status_log.close() logs_folder_name_status_key = "Built LOG folder for session" status_logger(status_logger_name, logs_folder_name_status_key) return abstracts_log_name, status_logger_name def keyword_url_generator(keywords_to_search): '''Reducing the long output of the pre_processing statement by offloading some of the scraper specific functions to another function''' if((type(keywords_to_search) == str)): '''If the user uses the function independently of the argument_parser() we need this to convert the keywords to a list of words''' keywords = argument_formatter(keywords_to_search) query_string = "" if (len(keywords)==1): query_string = keywords[0] else: for keyword_index in range(0, len(keywords)): if((keyword_index+1)==len(keywords)): query_string = query_string+keywords[keyword_index] else: query_string = query_string+keywords[keyword_index]+"+" start_url = "https://link.springer.com/search/page/" abstract_url = 'https://link.springer.com' '''We take the keywords here and generate the URLs here''' return start_url, abstract_url, query_string def abstract_id_log_name_generator(abstracts_log_name): '''We use this function to generate the abstract_id_log_name from the abstracts_log_name''' return abstracts_log_name.split('Abstract')[0] + 'Abstract_ID' + abstracts_log_name.split('Abstract')[1]+'_' def argument_formatter(argument_string): '''We make this into a function so that we can use it across the pyResearchInsights stack''' return argument_string.split() def arguments_parser(): '''This function is used to read the initial keyword that will be queried in Springer (for now). We will be scrapping Science, Nature etc later, as long as generic URLs are supported. Parses two arguments now: a) --keywords: This argument is the term that will be searched for in Springer. b) --trends: This argument provides the term whose research trend will be generated. c) --paper: This argument is triggered if the PDFs have to be downloaded as well.''' parser = argparse.ArgumentParser() parser.add_argument("--keywords", help="Keyword to search on Springer", default="Tiger") parser.add_argument("--trends", help="Keywords to generate the trends histogram for", default="Conservation") parser.add_argument("--paper", help="If papers have to be downloaded as well", default="No") arguments = parser.parse_args() if arguments.keywords: keywords = arguments.keywords '''The keyword if a string will be split and then be passed to the scraper functions''' keywords = argument_formatter(keywords) if arguments.trends: trends = arguments.trends '''The entire list of the abstract words will be lowered and hence trends term has to be lowered to obtain a match with those terms.''' '''if arguments.paper: paper = arguments.paper''' '''If this argument is turned to Yes, then the papers will be downloaded as well''' trends = trends.lower() trends = argument_formatter(trends) return keywords, trends def end_process(status_logger_name): '''Self-explanatory, this function declares successful completion of the code.''' end_process_status_key="Process has successfully ended" status_logger(status_logger_name, end_process_status_key)
/pyResearchInsights/example.py
'''Hello! We have decided to modularize the entire code, and run it off of one common script. In the future, the Analyzer.py and the Visualizer.py scripts will be called here as well. Check out the build-log.md for a detailed changes implemented. Check out the README.md for more details about the project. Sarthak J. Shetty 12/09/2018''' '''Imports scraper_main() from Scraper.py''' from pyResearchInsights.Scraper import scraper_main '''Importing the analyzer code here as well''' from pyResearchInsights.Analyzer import analyzer_main '''Importing the Cleaner functions here that removes special characters from the corpus''' from pyResearchInsights.Cleaner import cleaner_main '''Importing the visualizer and gensim code here''' from pyResearchInsights.NLP_Engine import nlp_engine_main '''Imports some of the functions required by different scripts here.''' from pyResearchInsights.common_functions import pre_processing '''Declaring tarballer here from system_functions() to tarball the LOG directory, & rm_original_folder to delete the directory and save space.''' from pyResearchInsights.system_functions import tarballer, rm_original_folder keywords_to_search = "Western Ghats Conservation" '''Calling the pre_processing functions here so that abstracts_log_name and status_logger_name is available across the code.''' abstracts_log_name, status_logger_name = pre_processing(keywords_to_search) '''Runs the scraper here to scrape the details from the scientific repository''' scraper_main(keywords_to_search, abstracts_log_name, status_logger_name) '''Cleaning the corpus here before any of the other modules use it for analysis''' cleaner_main(abstracts_log_name, status_logger_name) '''Calling the Analyzer Function here''' analyzer_main(abstracts_log_name, status_logger_name) '''Calling the visualizer code below this portion''' nlp_engine_main(abstracts_log_name, status_logger_name)
/pyResearchInsights/system_functions.py
'''Hello! This portion of the code that acts as the processing code corroborating with the main scripts [re: Scraper, Analyzer+NLP_Engine, Visualizer] - Sarthak J. Shetty 06/02/2019 This script has been renamed as the system_functions.py to carry out OS level interactions, such as: 1. tarballing the LOGS generated to reduce space. 2. Deleting the LOGs once the tarball has been created. 3. (Eventually) enable shell script to send the tarballed file over mail to the user. 4. (Eventually) enable shell script to upload the LOGS generated to GitHub. - Sarthak J. Shetty 15/04/2019''' '''Importing OS to call the tar function to generate the .tar file.''' import os '''From common_functions.py calling the status_logger() function to LOG the tarballing process and others as they are added here.''' from pyResearchInsights.common_functions import status_logger def rm_original_folder(logs_folder_name, status_logger_name): '''This function deletes the logs folder generated once the .tar.gz file has been created.''' rm_original_folder_start_status_key = "Deleting files belonging to:"+" "+logs_folder_name status_logger(status_logger_name, rm_original_folder_start_status_key) command_to_rm_function = "rm -r"+" "+logs_folder_name os.system(command_to_rm_function) def tarballer(logs_folder_name, status_logger_name): '''This function prepares the tar ball of the LOG file.''' tarballer_start_status_key = "Tarballing"+" "+logs_folder_name+" "+"into"+" "+logs_folder_name+".tar.gz" status_logger(status_logger_name, tarballer_start_status_key) command_to_tar_function = "tar czf"+" "+logs_folder_name+".tar.gz"+" "+logs_folder_name os.system(command_to_tar_function) tarballer_start_end_key = "Tarballed"+" "+logs_folder_name+" "+"into"+" "+logs_folder_name+".tar.gz" status_logger(status_logger_name, tarballer_start_end_key)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
lbarchive/b.py
refs/heads/master
{"/bpy/services/base.py": ["/bpy/handlers/__init__.py"], "/bpy/services/wordpress.py": ["/bpy/handlers/__init__.py", "/bpy/services/base.py"], "/tests/test_bpy_handlers_mkd.py": ["/bpy/handlers/mkd.py"], "/b.py": ["/bpy/services/__init__.py"], "/tests/test_bpy_handlers_text.py": ["/bpy/handlers/text.py"], "/tests/test_bpy_handlers_rst.py": ["/bpy/handlers/rst.py"], "/tests/test_bpy_handlers_base.py": ["/bpy/handlers/base.py"], "/bpy/services/blogger.py": ["/bpy/services/base.py"]}
└── ├── b.py ├── bpy │ ├── handlers │ │ ├── __init__.py │ │ ├── asciidoc.py │ │ ├── base.py │ │ ├── html.py │ │ ├── mkd.py │ │ ├── rst.py │ │ └── text.py │ └── services │ ├── __init__.py │ ├── base.py │ ├── blogger.py │ └── wordpress.py └── tests ├── test_bpy_handlers_base.py ├── test_bpy_handlers_mkd.py ├── test_bpy_handlers_rst.py ├── test_bpy_handlers_text.py └── test_setup.py
/b.py
#!/usr/bin/env python # Copyright (C) 2013-2016 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ ============ b.py command ============ Commands ======== ============= ======================= command supported services ============= ======================= ``blogs`` ``b`` ``post`` ``b``, ``wp`` ``generate`` ``base``, ``b``, ``wp`` ``checklink`` ``base``, ``b``, ``wp`` ``search`` ``b`` ============= ======================= Descriptions: ``blogs`` list blogs. This can be used for blog IDs lookup. ``post`` post or update a blog post. ``generate`` generate HTML file at ``<TEMP>/draft.html``, where ``<TEMP>`` is the system's temporary directory. The generation can output a preview html at ``<TEMP>/preview.html`` if there is ``tmpl.html``. It will replace ``%%Title%%`` with post title and ``%%Content%%`` with generated HTML. ``checklink`` check links in generated HTML using lnkckr_. ``search`` search blog .. _lnkckr: https://pypi.python.org/pypi/lnkckr """ from __future__ import print_function import argparse as ap import codecs import imp import logging import os import sys import traceback from bpy.handlers import handlers from bpy.services import find_service, services __program__ = 'b.py' __description__ = 'Post to Blogger or WordPress in markup language seamlessly' __copyright__ = 'Copyright 2013-2016, Yu Jie Lin' __license__ = 'MIT License' __version__ = '0.11.0' __website__ = 'http://bitbucket.org/livibetter/b.py' __author__ = 'Yu-Jie Lin' __author_email__ = 'livibetter@gmail.com' # b.py stuff ############ # filename of local configuration without '.py' suffix. BRC = 'brc' def parse_args(): p = ap.ArgumentParser() p.add_argument('--version', action='version', version='%(prog)s ' + __version__) p.add_argument('-d', '--debug', action='store_true', help='turn on debugging messages') p.add_argument('-s', '--service', default='base', help='what service to use. (Default: %(default)s)') sp = p.add_subparsers(help='commands') pblogs = sp.add_parser('blogs', help='list blogs') pblogs.set_defaults(subparser=pblogs, command='blogs') psearch = sp.add_parser('search', help='search for posts') psearch.add_argument('-b', '--blog', help='Blog ID') psearch.add_argument('q', nargs='+', help='query text') psearch.set_defaults(subparser=psearch, command='search') pgen = sp.add_parser('generate', help='generate html') pgen.add_argument('filename') pgen.set_defaults(subparser=pgen, command='generate') pchk = sp.add_parser('checklink', help='check links in chkerateed html') pchk.add_argument('filename') pchk.set_defaults(subparser=pchk, command='checklink') ppost = sp.add_parser('post', help='post or update a blog post') ppost.add_argument('filename') ppost.set_defaults(subparser=ppost, command='post') args = p.parse_args() return args def load_config(): rc = None try: search_path = [os.getcwd()] _mod_data = imp.find_module(BRC, search_path) print('Loading local configuration...') try: rc = imp.load_module(BRC, *_mod_data) finally: if _mod_data[0]: _mod_data[0].close() except ImportError: pass except Exception: traceback.print_exc() print('Error in %s, aborted.' % _mod_data[1]) sys.exit(1) return rc def main(): args = parse_args() logging.basicConfig( format=( '%(asctime)s ' '%(levelname).4s ' '%(module)5.5s:%(funcName)-10.10s:%(lineno)04d ' '%(message)s' ), datefmt='%H:%M:%S', ) if args.debug: logging.getLogger().setLevel(logging.DEBUG) encoding = sys.stdout.encoding if not encoding.startswith('UTF'): msg = ( 'standard output encoding is %s, ' 'try to set with UTF-8 if there is output issues.' ) logging.warning(msg % encoding) if sys.version_info.major == 2: sys.stdout = codecs.getwriter(encoding)(sys.stdout, 'replace') sys.stderr = codecs.getwriter(encoding)(sys.stderr, 'replace') elif sys.version_info.major == 3: sys.stdout = codecs.getwriter(encoding)(sys.stdout.buffer, 'replace') sys.stderr = codecs.getwriter(encoding)(sys.stderr.buffer, 'replace') rc = load_config() service_options = {'blog': None} if rc: if hasattr(rc, 'handlers'): for name, handler in rc.handlers.items(): if name in handlers: handlers[name].update(handler) else: handlers[name] = handler.copy() if hasattr(rc, 'services'): for name, service in rc.services.items(): if name in services: services[name].update(service) else: services[name] = service.copy() if hasattr(rc, 'service'): args.service = rc.service if hasattr(rc, 'service_options'): service_options.update(rc.service_options) if hasattr(args, 'blog') and args.blog is not None: service_options['blog'] = args.blog filename = args.filename if hasattr(args, 'filename') else None service = find_service(args.service, service_options, filename) if args.command == 'blogs': service.list_blogs() elif args.command == 'search': service.search(' '.join(args.q)) elif args.command == 'generate': service.generate() elif args.command == 'checklink': service.checklink() elif args.command == 'post': service.post() if __name__ == '__main__': main()
/bpy/handlers/__init__.py
# Copyright (C) 2013 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Markup handlers' IDs and extensions: ==================== ======================================================== ID extensions ==================== ======================================================== ``AsciiDoc`` ``.asciidoc`` ``HTML`` ``.html``, ``.htm``, ``.raw`` ``Markdown`` ``.md``, ``.mkd``, ``.mkdn``, ``.mkdown``, ``.markdown`` ``reStructuredText`` ``.rst`` ``Text`` ``.txt``, ``.text`` ==================== ======================================================== Options ======= The general options are supported by all handlers, defined in :class:`bpy.handlers.base.BaseHandler`, but they have to be specified per handler basis, the following sample code shows the options and their default value: .. code:: python handlers = { '<MARKUP HANDLER ID>': { 'options': { # prefix string to HTML ID to avoid conflict 'id_affix': None, # string to prepend to actual markup 'markup_prefix': '', # string to append to actual markup 'markup_suffix': '', # use smartypant to process the output of markup processor 'smartypants': False, # support image embedding via data URI scheme 'embed_images': False, }, }, } .. _id_affix: ``id_affix`` ------------ ``id_affix`` is used to avoid conflict across posts' HTML element ID. It may be a prefix or suffix, depending on handler's implementation and markup library's support. It has three types of value: 1. ``None``: no affix to ID. 2. non-empty string: the string is the affix. 3. empty string: the affix is generated automatically. Currently supported markup handler: * :mod:`bpy.handlers.rst` ``markup_prefix`` and ``markup_suffix`` --------------------------------------- ``markup_prefix`` and ``markup_suffix`` can be useful for adding header and footer content for posts. Another useful case in reStructuredText is you can use it for setting up some directives, for example ``.. sectnum::``, so you can ensure all posts have prefixing section number if in use conjunction with ``.. contents::``. ``smartypants`` --------------- If ``smartypants`` is enabled, then all generated HTML from markup processor will be processed by smartypants_ library. .. _smartypants: https://pypi.python.org/pypi/smartypants .. _embed_images: ``embed_images`` ---------------- .. note:: Only :mod:`bpy.handlers.text` does not support this option. When this option is enabled, it looks for the ``src`` attribute of ``img`` tag in rendered HTML, see if there is a local files, excluding ``http``, ``https``, and ``data`` schemes, if found, it reads the file and embeds with Base64 encoded content. For example, in reStructuredText: .. code:: rst .. image:: /path/to/test.png Instead of .. code:: html <img alt="/path/to/test.png" src="/path/to/test.png" /> It could be replaced with, if ``/path/to/test.png`` exists: .. code:: html <img alt="/path/to/test.png" src="data:image/png;base64,..." /> If the image file can't be found, a message will be printed out, the rendered image tag will be kept untouched. .. _custom-handler: Writing a custom handler ======================== A sample handler ``sample_handler.py``: .. code:: python from bpy.handlers import base class Handler(base.BaseHandler): PREFIX_HEAD = '' PREFIX_END = '' HEADER_FMT = '%s: %s' def _generate(self, markup=None): if markup is None: markup = self.markup html = do_process(markup) return html And corresponding setting in ``brc.py``: .. code:: python import re handlers = { 'SampleHandler': { 'match': re.compile(r'.*\.ext$'), 'module': 'sample_handler', }, } """ import os import re import sys import traceback handlers = { 'AsciiDoc': { 'match': re.compile(r'.*\.asciidoc$'), 'module': 'bpy.handlers.asciidoc', }, 'HTML': { 'match': re.compile(r'.*\.(html?|raw)$'), 'module': 'bpy.handlers.html', }, 'Markdown': { 'match': re.compile(r'.*\.(markdown|md(own)?|mkdn?)$'), 'module': 'bpy.handlers.mkd', }, 'reStructuredText': { 'match': re.compile(r'.*\.rst$'), 'module': 'bpy.handlers.rst', }, 'Text': { 'match': re.compile(r'.*\.te?xt$'), 'module': 'bpy.handlers.text', }, } def find_handler(filename): sys.path.insert(0, os.getcwd()) module = None for name, hdlr in handlers.items(): if hdlr['match'].match(filename): try: module = __import__(hdlr['module'], fromlist=['Handler']) break except Exception: print('Cannot load module %s of handler %s' % (hdlr['module'], name)) traceback.print_exc() sys.path.pop(0) if module: return module.Handler(filename, hdlr.get('options', {})) return None
/bpy/handlers/asciidoc.py
#!/usr/bin/env python # Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ You can specify embed_images_, for example: .. code:: python handlers = { 'AsciiDoc': { 'options': { 'embed_images': True, }, }, } """ from __future__ import print_function, unicode_literals import StringIO from bpy.api.asciidocapi import AsciiDocAPI from bpy.handlers import base class Handler(base.BaseHandler): """Handler for AsciiDoc markup language >>> handler = Handler(None) >>> print(handler.generate_header({'title': 'foobar'})) // !b // title: foobar <BLANKLINE> """ PREFIX_HEAD = '// ' PREFIX_END = '' HEADER_FMT = '// %s: %s' def _generate(self, markup=None): """Generate HTML from AsciiDoc >>> handler = Handler(None) >>> print(handler._generate('a *b*')) <p>a <strong>b</strong></p> >>> print(handler._generate('a\\nb')) <p>a b</p> >>> print(handler._generate('a\\nb\\n\\nc')) <p>a b</p> <p>c</p> """ if markup is None: markup = self.markup markup = markup.encode('utf8') asciidoc = AsciiDocAPI() infile = StringIO.StringIO(markup) outfile = StringIO.StringIO() asciidoc.options('--no-header-footer') asciidoc.execute(infile, outfile, backend='html4') html = outfile.getvalue().decode('utf8') html = html.replace('\r\n', '\n').rstrip() return html
/bpy/handlers/base.py
# Copyright (C) 2013-2015 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import print_function, unicode_literals import codecs import logging import re import warnings from abc import ABCMeta, abstractmethod from base64 import b64encode from hashlib import md5 from os.path import basename, exists, splitext HAS_SMARTYPANTS = False try: import smartypants HAS_SMARTYPANTS = True except ImportError: pass class BaseHandler(): """The base clase of markup handler""" __metaclass__ = ABCMeta # default handler options OPTIONS = { 'markup_prefix': '', 'markup_suffix': '', 'smartypants': False, 'id_affix': None, } MERGE_HEADERS = ('service', 'kind', 'blog', 'id', 'url', 'draft') HEADER_FMT = '%s: %s' PREFIX_HEAD = '' PREFIX_END = '' RE_SPLIT = re.compile(r'^(?:([^\n]*?!b.*?)\n\n)?(.*)', re.DOTALL | re.MULTILINE) RE_HEADER = re.compile(r'.*?([a-zA-Z0-9_-]+)\s*[=:]\s*(.*)\s*') SUPPORT_EMBED_IMAGES = True RE_IMG = re.compile( r''' (?P<prefix><img.*?) src="(?!data:image/|https?://)(?P<src>[^"]*)" (?P<suffix>.*?>) ''', re.VERBOSE ) def __init__(self, filename, options=None): self.filename = filename self.title = '' self.options = self.OPTIONS.copy() self.options.update(options or {}) if filename: with codecs.open(filename, 'r', 'utf8') as f: self.source = f.read() header, markup = self.split_header_markup() self.title = splitext(basename(filename))[0] else: header = {} markup = '' self.header = header self.markup = markup self.modified = False def set_header(self, k, v): """Set header >>> class Handler(BaseHandler): ... def _generate(self, source=None): return source >>> handler = Handler(None) >>> print(handler.header) {} >>> handler.modified False >>> handler.set_header('foo', 'bar') >>> print(handler.header['foo']) bar >>> handler.modified True """ if k in self.header and self.header[k] == v: return self.header[k] = v self.modified = True def merge_header(self, header): """Merge header >>> class Handler(BaseHandler): ... def _generate(self, source=None): return source >>> handler = Handler(None) >>> handler.merge_header({'id': 12345, 'bogus': 'blah'}) >>> print(handler.header['id']) 12345 >>> handler.modified True """ for k, v in header.items(): if k not in self.MERGE_HEADERS: continue if k == 'blog': v = v['id'] elif k == 'kind': v = v.replace('blogger#', '') self.set_header(k, v) @property def markup(self): """Return markup with markup_prefix and markup_suffix >>> class Handler(BaseHandler): ... def _generate(self, source=None): return source >>> options = { ... 'markup_prefix': 'the prefix\\n', ... 'markup_suffix': '\\nthe suffix', ... } >>> handler = Handler(None, options) >>> handler.markup = 'content' >>> print(handler.markup) the prefix content the suffix """ return '%s%s%s' % ( self.options['markup_prefix'], self._markup, self.options['markup_suffix'], ) @markup.setter def markup(self, markup): """Set the markup""" self._markup = markup @property def id_affix(self): """Return id_affix The initial value is from self.options, and can be overriden by self.header. Returns * None if it's None. * value if value is not '' * first 4 digits of md5 of value if value is '', and assign back to self.options. _generate method of Handler should write back to self.header. >>> class Handler(BaseHandler): ... def _generate(self, source=None): return source >>> options = { ... 'id_affix': None, ... } >>> handler = Handler(None, options) >>> print(repr(handler.id_affix)) None >>> handler.options['id_affix'] = 'foobar' >>> print(handler.id_affix) foobar >>> # auto generate an id affix from title >>> handler.options['id_affix'] = '' >>> handler.title = 'abc' >>> print(handler.id_affix) 9001 >>> handler.header['id_affix'] = 'override-affix' >>> print(handler.id_affix) override-affix """ id_affix = self.options['id_affix'] # override? if 'id_affix' in self.header: id_affix = self.header['id_affix'] if self.header['id_affix'] and id_affix != 'None': return self.header['id_affix'] # second case is from header of post, has to use string 'None' if id_affix is None or id_affix == 'None': return None if id_affix: return id_affix m = md5() # if self.title is Unicode-type string, then encode it, # otherwise it's byte-type, then just update with it. # The __future__.unicode_literals ensures '' is unicode-type. if isinstance(self.title, type('')): m.update(self.title.encode('utf8')) else: m.update(self.title) return m.hexdigest()[:4] @abstractmethod def _generate(self, markup=None): """Generate HTML of markup source""" raise NotImplementedError def generate(self, markup=None): """Generate HTML >>> class Handler(BaseHandler): ... def _generate(self, markup=None): return markup >>> handler = Handler(None) >>> print(handler.generate('foo "bar"')) foo "bar" >>> handler.options['smartypants'] = True >>> print(handler.generate('foo "bar"')) foo &#8220;bar&#8221; """ if markup is None: markup = self.markup html = self._generate(markup) if self.options.get('smartypants', False): if not HAS_SMARTYPANTS: warnings.warn("smartypants option is set, " "but the library isn't installed.", RuntimeWarning) return html Attr = smartypants.Attr html = smartypants.smartypants(html, Attr.set1 | Attr.w) if self.SUPPORT_EMBED_IMAGES and self.options.get('embed_images', False): html = self.embed_images(html) return html def generate_header(self, header=None): """Generate header in text for writing back to the file >>> class Handler(BaseHandler): ... PREFIX_HEAD = 'foo ' ... PREFIX_END = 'bar' ... HEADER_FMT = '--- %s: %s' ... def _generate(self, source=None): pass >>> handler = Handler(None) >>> print(handler.generate_header({'title': 'foobar'})) foo !b --- title: foobar bar <BLANKLINE> >>> print(handler.generate_header({'labels': ['foo', 'bar']})) foo !b --- labels: foo, bar bar <BLANKLINE> """ if header is None: header = self.header lines = [self.PREFIX_HEAD + '!b'] for k, v in header.items(): if k in ('labels', 'categories'): v = ', '.join(v) elif k == 'draft': v = repr(v) lines.append(self.HEADER_FMT % (k, v)) lines.append(self.PREFIX_END) return '\n'.join([_f for _f in lines if _f]) + '\n' def generate_title(self, title=None): """Generate title for posting >>> class Handler(BaseHandler): ... def _generate(self, source=None): return source >>> handler = Handler(None) >>> print(handler.generate_title('foo "bar"')) foo "bar" >>> print(handler.generate_title('foo\\nbar\\n\\n')) foo bar >>> handler.options['smartypants'] = True >>> print(handler.generate_title('foo "bar"')) foo &#8220;bar&#8221; """ if title is None: title = self.header.get('title', self.title) title = self.generate(title) title = title.replace('<p>', '').replace('</p>', '') # no trailing newlines title = re.sub(r'\n+', ' ', title).rstrip() return title def generate_post(self): """Generate dict for merging to post object of API""" post = {'title': self.generate_title(), 'draft': False} for k in ('blog', 'id', 'labels', 'categories', 'draft'): if k not in self.header: continue if k == 'blog': post[k] = {'id': self.header[k]} else: post[k] = self.header[k] return post def split_header_markup(self, source=None): """Split source into header and markup parts It also parses header into a dict.""" if source is None: source = self.source header, markup = self.RE_SPLIT.match(source).groups() if not header: logging.warning('found no header') if not markup: logging.warning('markup is empty') logging.debug('markup length = %d' % len(markup)) _header = {} if header: for item in header.split('\n'): m = self.RE_HEADER.match(item) if not m: continue k, v = list(map(type('').strip, m.groups())) if k in ('labels', 'categories'): v = [_f for _f in [label.strip() for label in v.split(',')] if _f] elif k == 'draft': v = v.lower() in ('true', 'yes', '1') _header[k] = v header = _header logging.debug('header = %r' % header) return header, markup def update_source(self, header=None, markup=None, only_returned=False): if header is None: header = self.header if markup is None: markup = self._markup source = self.generate_header(header) + '\n' + markup if not only_returned: self.source = source return source def write(self, forced=False): """Write source back to file""" if not self.modified: if not forced: return else: self.update_source() with codecs.open(self.filename, 'w', 'utf8') as f: f.write(self.source) self.modified = False def embed_images(self, html): """Embed images on local filesystem as data URI >>> class Handler(BaseHandler): ... def _generate(self, source=None): return source >>> handler = Handler(None) >>> html = '<img src="http://example.com/example.png"/>' >>> print(handler.embed_images(html)) <img src="http://example.com/example.png"/> >>> html = '<img src="tests/test.png"/>' >>> print(handler.embed_images(html)) #doctest: +ELLIPSIS <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAB...QmCC"/> """ if not self.SUPPORT_EMBED_IMAGES: raise RuntimeError('%r does not support embed_images' % type(self)) return self.RE_IMG.sub(self._embed_image, html) @staticmethod def _embed_image(match): src = match.group('src') if not exists(src): print('%s is not found.' % src) return match.group(0) with open(src, 'rb') as f: data = b64encode(f.read()).decode('ascii') return '%ssrc="%s"%s' % ( match.group('prefix'), 'data:image/%s;base64,%s' % (splitext(src)[1].lstrip('.'), data), match.group('suffix'), )
/bpy/handlers/html.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ HTML handler simply takes the file content as its output, and assume it's valid HTML, therefore the handler doesn't edit or validate the content. You can specify embed_images_, for example: .. code:: python handlers = { 'HTML': { 'options': { 'embed_images': True, }, }, } """ from __future__ import print_function, unicode_literals from bpy.handlers import base class Handler(base.BaseHandler): """Handler for HTML >>> handler = Handler(None) >>> print(handler.generate_header({'title': 'foobar'})) <!-- !b title: foobar --> <BLANKLINE> """ PREFIX_HEAD = '<!-- ' PREFIX_END = '-->' HEADER_FMT = '%s: %s' def _generate(self, markup=None): """Return markup untouched This handler doesn't do anything to the markup. >>> handler = Handler(None) >>> print(handler._generate('<br/>')) <br/> """ if markup is None: markup = self.markup return markup
/bpy/handlers/mkd.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ You can specify `configuration`__ for Python Markdown in :ref:`brc.py` or embed_images_, for example: __ http://packages.python.org/Markdown/reference.html#markdown .. code:: python handlers = { 'Markdown': { 'options': { 'config': { 'extensions': ['extension1', 'extension2'], 'tab_length': 8, }, 'embed_images': True, }, }, } """ from __future__ import print_function, unicode_literals import markdown from bpy.handlers import base class Handler(base.BaseHandler): """Handler for Markdown markup language >>> handler = Handler(None) >>> print(handler.generate_header({'title': 'foobar'})) <!-- !b title: foobar --> <BLANKLINE> """ PREFIX_HEAD = '<!-- ' PREFIX_END = '-->' HEADER_FMT = '%s: %s' def _generate(self, markup=None): """Generate HTML from Markdown >>> handler = Handler(None) >>> print(handler._generate('a *b*')) <p>a <em>b</em></p> """ if markup is None: markup = self.markup # markdown library only accepts unicode, utf8 encoded str results in error. html = markdown.markdown(markup, **self.options.get('config', {})) return html
/bpy/handlers/rst.py
# Copyright (C) 2011-2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ You can specify settings-overrides_ for reStructuredText in :ref:`brc.py` or the embed_images_, for example: .. code:: python handlers = { 'reStructuredText': { 'options': { 'embed_images': True, 'register_directives': { 'dir_name': MyDir, }, 'register_roles': { 'role_name': MyRole, }, 'settings_overrides': { 'footnote_references': 'brackets', }, }, }, } .. _settings-overrides: http://docutils.sourceforge.net/docs/user/config.html#html4css1-writer Custom Directives and Roles =========================== For adding your own custom reStructuredText directives or roles, you can do it in :ref:`brc.py` with one of the following method: * by calling register functions of docutils directly, * by adding in b.py's option as shown above, or * by using decorator of b.py, for example: .. code:: python from docutils.parsers.rst import Directive from bpy.handlers.rst import register_directive, register_role @register_directive('mydir') class MyDir(Directive): pass @register_role('myrole') def myrole(name, rawtext, text, lineno, inliner, options=None, content=None): pass """ from __future__ import print_function, unicode_literals from docutils.core import publish_parts from docutils.parsers.rst import directives, roles from bpy.handlers import base def register_directive(dir_name): """For lazy guys .. code:: python @register_directive(name) class MyDirective(Directive): pass """ def _register_directive(directive): directives.register_directive(dir_name, directive) return directive return _register_directive def register_role(role_name): def _register_role(role): roles.register_canonical_role(role_name, role) return role return _register_role class Handler(base.BaseHandler): """Handler for reStructuredText markup language >>> handler = Handler(None) >>> print(handler.generate_header({'title': 'foobar'})) .. !b title: foobar <BLANKLINE> """ PREFIX_HEAD = '.. ' PREFIX_END = '' HEADER_FMT = ' %s: %s' def __init__(self, filename, options=None): super(Handler, self).__init__(filename, options) if not options: return for dir_name, directive in options.get('register_directives', {}).items(): directives.register_directive(dir_name, directive) for role_name, role in options.get('register_roles', {}).items(): roles.register_canonical_role(role_name, role) def _generate(self, markup=None): """Generate HTML from Markdown >>> handler = Handler(None) >>> print(handler._generate('a *b*')) <p>a <em>b</em></p> """ if markup is None: markup = self.markup settings_overrides = { 'output_encoding': 'utf8', 'initial_header_level': 2, 'doctitle_xform': 0, 'footnote_references': 'superscript', } settings_overrides.update(self.options.get('settings_overrides', {})) id_affix = self.id_affix if id_affix: settings_overrides['id_prefix'] = id_affix + '-' self.set_header('id_affix', id_affix) doc_parts = publish_parts(markup, settings_overrides=settings_overrides, writer_name="html") html = doc_parts['body_pre_docinfo'] + doc_parts['body'].rstrip() return html
/bpy/handlers/text.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ The Text handler for plain text always escape HTML, and add ``<br/>`` if not ``pre_wrap``. You can specify the following options for plain text in :ref:`brc.py`, for example: .. code:: python handlers = { 'Text': { 'options': { 'pre_wrap': False }, }, } ``pre_wrap`` will wrap output in ``<pre/>`` tag. """ from __future__ import print_function, unicode_literals import cgi import re from bpy.handlers import base class Handler(base.BaseHandler): """Handler for plain text >>> handler = Handler(None) >>> handler.markup = 'post <content>\\n & something' >>> print(handler.generate()) post &lt;content&gt;<br/> &amp; something >>> handler.options['pre_wrap'] = True >>> print(handler.generate()) <pre>post &lt;content&gt; &amp; something</pre> >>> handler = Handler(None) >>> print(handler.generate_header({'title': 'foobar'})) !b title: foobar <BLANKLINE> """ PREFIX_HEAD = '' PREFIX_END = '' HEADER_FMT = '%s: %s' SUPPORT_EMBED_IMAGES = False def generate_title(self, markup=None): """Generate HTML from plain text >>> handler = Handler(None) >>> print(handler.generate_title('a < b\\nc & d\\n\\nfoo')) a &lt; b c &amp; d foo """ html = super(Handler, self).generate_title(markup) html = html.replace('<pre>', '').replace('</pre>', '') return re.sub('(<br/> )+', ' ', html) def _generate(self, markup=None): """Generate HTML from plain text >>> handler = Handler(None) >>> print(handler._generate('a < b\\nc & d\\n\\xc3\\xa1')) a &lt; b<br/> c &amp; d<br/> \xc3\xa1 >>> handler.options['pre_wrap'] = True >>> print(handler._generate('abc\\ndef')) <pre>abc def</pre> """ if markup is None: markup = self.markup html = cgi.escape(markup) if self.options.get('pre_wrap', False): return '<pre>%s</pre>' % html else: return html.replace('\n', '<br/>\n')
/bpy/services/__init__.py
# Copyright (C) 2013 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Services' IDs: ========= ===================== service IDs ========= ===================== Base ``base`` Blogger ``b``, ``blogger`` WordPress ``wp``, ``wordpress`` ========= ===================== .. _service-options: Options ======= To assign options to chosen service, add ``service_options`` in :ref:`brc.py`, for example: .. code:: python service = "<service id>" service_options = { 'option1': 'value1', 'option2': 2, } .. _custom-service: Writing a custom service ======================== A sample handler ``sample_service.py``: .. code:: python from bpy.service import base class Service(base.Service): # see bpy/services for examples pass And corresponding setting in :ref:`brc.py`: .. code:: python import re # this matches the re service = 'foobar' services = { 'SampleService': { 'match': re.compile(r'^foobar$'), 'module': 'sample_service', }, } """ import os import re import sys import traceback services = { 'Base': { 'match': re.compile(r'^base$', re.I), 'module': 'bpy.services.base', }, 'Blogger': { 'match': re.compile(r'^(b|blogger)$', re.I), 'module': 'bpy.services.blogger', }, 'WordPress': { 'match': re.compile(r'^(wp|wordpress)$', re.I), 'module': 'bpy.services.wordpress', }, } def find_service(service_name, service_options, *args, **kwargs): sys.path.insert(0, os.getcwd()) module = None for name, hdlr in services.items(): if hdlr['match'].match(service_name): try: module = __import__(hdlr['module'], fromlist=['Service']) break except Exception: print('Cannot load module %s of service %s' % (hdlr['module'], name)) traceback.print_exc() sys.path.pop(0) if module: return module.Service(service_options, *args, **kwargs) return None
/bpy/services/base.py
# Copyright (C) 2013 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Base recognizes no options, it's only used for ``generate`` or ``checklink`` commands. """ from __future__ import print_function import codecs import os import sys from io import StringIO from os import path from tempfile import gettempdir from bpy.handlers import find_handler HAS_LNKCKR = False try: from lnkckr.checkers.html import Checker HAS_LNKCKR = True except ImportError: pass TEMPLATE_PATH = path.join(os.getcwd(), 'tmpl.html') class Service(object): """The base clase of markup handler""" service_name = 'base' def __init__(self, options, filename=None): self.options = options self.filename = filename def post(self): """Publish the post to the service""" raise NotImplementedError def make_handler_post(self): handler = find_handler(self.filename) if not handler: print('No handler for the file!') sys.exit(1) hdr = handler.header post = { 'service': self.service_name, # default resource kind is blogger#post 'kind': 'blogger#%s' % hdr.get('kind', 'post'), 'content': handler.generate(), } if isinstance(self.options['blog'], int): post['blog'] = {'id': self.options['blog']} post.update(handler.generate_post()) return handler, post def generate(self): handler, post = self.make_handler_post() with codecs.open(path.join(gettempdir(), 'draft.html'), 'w', encoding='utf8') as f: f.write(post['content']) if path.exists(TEMPLATE_PATH): with codecs.open(TEMPLATE_PATH, encoding='utf8') as f: html = f.read() html = html.replace('%%Title%%', post['title']) html = html.replace('%%Content%%', post['content']) with codecs.open(path.join(gettempdir(), 'preview.html'), 'w', encoding='utf8') as f: f.write(html) def checklink(self): if not HAS_LNKCKR: print('You do not have lnkckr library') return handler, post = self.make_handler_post() c = Checker() c.process(StringIO(post['content'])) c.check() print() c.print_all() def search(self, q): """Search posts""" raise NotImplementedError
/bpy/services/blogger.py
# Copyright (C) 2013-2016 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Blogger service recognizes the following options in :ref:`brc.py`: .. _blogger-brc: .. code:: python service = 'blogger' service_options = { client_id: '<your client ID>', client_secret: '<your client secret>', 'blog': <blog id>, } You can use ``blogs`` command to quickly get the blog ID. .. _Authorization: Authorization ============= You need to authorize *b.py* to access your Blogger account with your OAuth `client ID`_. Simply using ``blogs`` command (see *Commands* section) to start the authorization process: .. code:: sh b.py blogs Once you follow the prompted steps, there should be a b.dat_ created under the current working directory, you should keep it safe. .. _Client ID: Client ID ========= You will need to obtain a OAuth Client ID in order to use *b.py*. 1. Go to `Google Developers Console`_. 2. Create a new project. 3. Enable *Blogger API*. 4. Create a *OAuth client ID* credential with *Other* application type. 5. Download the credential JSON for *Client Secret*. 6. Add *Client ID* and *Client Secert* to your :ref:`brc.py` as shown here__. .. _Google Developers Console: https://console.developers.google.com/ __ blogger-brc_ .. _b.dat: ``b.dat`` ========= ``b.dat`` is a credential file for Blogger service, it's read by *b.py* from the current directory. To create the file, please follow Authorization_. """ from __future__ import print_function import os import sys import httplib2 from bpy.services.base import Service as BaseService if sys.version_info.major == 2: from apiclient.discovery import build from oauth2client.client import OAuth2WebServerFlow from oauth2client.file import Storage as BaseStorage from oauth2client.tools import run_flow, argparser API_STORAGE = 'b.dat' class Storage(BaseStorage): """Inherit the API Storage to suppress CredentialsFileSymbolicLinkError """ def __init__(self, filename): super(Storage, self).__init__(filename) self._filename_link_warned = False def _validate_file(self): if os.path.islink(self._filename) and not self._filename_link_warned: print('File: %s is a symbolic link.' % self._filename) self._filename_link_warned = True class Service(BaseService): service_name = 'blogger' def __init__(self, *args, **kwargs): super(Service, self).__init__(*args, **kwargs) self.http = None self.service = None if 'client_id' not in self.options or 'client_secret' not in self.options: raise RuntimeError( 'You need to supply client ID and secret, see ' 'http://pythonhosted.org/b.py/apidoc/bpy.services.html#client-id' ) self.client_id = self.options['client_id'] self.client_secret = self.options['client_secret'] def auth(self): if sys.version_info.major != 2: msg = ('This command requires google-api-python-client, ' 'which only support Python 2') raise RuntimeError(msg) if self.http and self.service: return FLOW = OAuth2WebServerFlow( self.client_id, self.client_secret, 'https://www.googleapis.com/auth/blogger', auth_uri='https://accounts.google.com/o/oauth2/auth', token_uri='https://accounts.google.com/o/oauth2/token', ) storage = Storage(API_STORAGE) credentials = storage.get() if credentials is None or credentials.invalid: credentials = run_flow(FLOW, storage, argparser.parse_args([])) http = httplib2.Http() self.http = credentials.authorize(http) self.service = build("blogger", "v3", http=self.http) def list_blogs(self): self.auth() blogs = self.service.blogs() req = blogs.listByUser(userId='self') resp = req.execute(http=self.http) print('%-20s: %s' % ('Blog ID', 'Blog name')) for blog in resp['items']: print('%-20s: %s' % (blog['id'], blog['name'])) def post(self): handler, post = self.make_handler_post() if 'blog' not in post: print('You need to specify which blog to post on ' 'in either brc.py or header of %s.' % handler.filename) sys.exit(1) self.auth() kind = post['kind'].replace('blogger#', '') title = post['title'] if kind == 'post': posts = self.service.posts() elif kind == 'page': posts = self.service.pages() else: raise ValueError('Unsupported kind: %s' % kind) data = { 'blogId': post['blog']['id'], 'body': post, } if 'id' in post: data['%sId' % kind] = post['id'] action = 'revert' if post['draft'] else 'publish' data[action] = True print('Updating a %s: %s' % (kind, title)) req = posts.update(**data) else: data['isDraft'] = post['draft'] print('Posting a new %s: %s' % (kind, title)) req = posts.insert(**data) resp = req.execute(http=self.http) resp['draft'] = resp['status'] == 'DRAFT' handler.merge_header(resp) handler.write() def search(self, q): if self.options['blog'] is None: raise ValueError('no blog ID to search') self.auth() fields = 'items(labels,published,title,url)' posts = self.service.posts() req = posts.search(blogId=self.options['blog'], q=q, fields=fields) resp = req.execute(http=self.http) items = resp.get('items', []) print('Found %d posts on Blog %s' % (len(items), self.options['blog'])) print() for post in items: print(post['title']) labels = post.get('labels', []) if labels: print('Labels:', ', '.join(labels)) print('Published:', post['published']) print(post['url']) print()
/bpy/services/wordpress.py
# Copyright (C) 2013, 2014, 2016 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ WordPress service recognizes the following options in :ref:`brc.py`: .. code:: python service = 'wordpress' service_options = { 'blog': <blog url>, 'username': 'user01', 'password': 'secret', } ``blog`` should be the URL of WordPress blog, for example, ``http://<something>.wordpress.com/`` or ``http://example.com/wordpress/``. Note that the tailing slash must be included. In order to use WordPress XML-RPC API, you must provide ``username`` and ``password``. """ from __future__ import print_function import sys from bpy.handlers import find_handler from bpy.services.base import Service as BaseService # isort has different result for Python 2 and 3, so skip them from wordpress_xmlrpc import Client, WordPressPage, WordPressPost # isort:skip from wordpress_xmlrpc.methods import posts # isort:skip class Service(BaseService): service_name = 'wordpress' def __init__(self, *args, **kwargs): super(Service, self).__init__(*args, **kwargs) self.service = None def auth(self): self.service = Client(self.options['blog'] + 'xmlrpc.php', self.options['username'], self.options['password']) def make_handler_post(self): handler = find_handler(self.filename) if not handler: print('No handler for the file!') sys.exit(1) hdr = handler.header post = { 'service': self.service_name, 'kind': hdr.get('kind', 'post'), 'content': handler.generate(), } if isinstance(self.options['blog'], type('')): post['blog'] = {'id': self.options['blog']} post.update(handler.generate_post()) return handler, post def post(self): handler, post = self.make_handler_post() if 'blog' not in post: print('You need to specify which blog to post on ' 'in either brc.py or header of %s.' % handler.filename) sys.exit(1) self.auth() kind = post['kind'] title = post['title'] if kind == 'post': wpost = WordPressPost() else: wpost = WordPressPage() wpost.title = title wpost.content = post['content'] wpost.post_status = 'draft' if post['draft'] else 'publish' wpost.terms_names = { 'post_tag': post.get('labels', []), 'category': post.get('categories', []), } resp = {} if 'id' in post: print('Updating a %s: %s' % (kind, title)) self.service.call(posts.EditPost(post['id'], wpost)) else: print('Posting a new %s: %s' % (kind, title)) wpost.id = self.service.call(posts.NewPost(wpost)) wpost = self.service.call(posts.GetPost(wpost.id)) resp['id'] = wpost.id resp['url'] = wpost.link for k in ('service', 'blog', 'kind', 'draft'): resp[k] = post[k] handler.merge_header(resp) handler.write()
/tests/test_bpy_handlers_base.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import unicode_literals import unittest from bpy.handlers.base import BaseHandler class Handler(BaseHandler): def _generate(self, source=None): return source class BaseHandlerTestCase(unittest.TestCase): def setUp(self): self.handler = Handler(None) def tearDown(self): self.handler = None def test_header_no_labels(self): handler = self.handler handler.source = '''!b post content''' header, markup = handler.split_header_markup() self.assertEqual(header, {}) self.assertEqual(markup, 'post content') def test_header_labels_none(self): handler = self.handler handler.source = '''!b labels: post content''' header, markup = handler.split_header_markup() self.assertEqual(header, {'labels': []}) self.assertEqual(markup, 'post content') def test_header_labels_single(self): handler = self.handler handler.source = '''!b labels: foobar post content''' header, markup = handler.split_header_markup() self.assertEqual(header, {'labels': ['foobar']}) self.assertEqual(markup, 'post content') def test_header_labels_two(self): handler = self.handler handler.source = '''!b labels: foo, bar post content''' header, markup = handler.split_header_markup() self.assertEqual(header, {'labels': ['foo', 'bar']}) self.assertEqual(markup, 'post content') def test_header_labels_with_empty_label(self): handler = self.handler handler.source = '''!b labels: foo, , bar post content''' header, markup = handler.split_header_markup() self.assertEqual(header, {'labels': ['foo', 'bar']}) self.assertEqual(markup, 'post content') # ===== def test_merge_header(self): handler = self.handler header = {'id': '123'} handler.header = header.copy() handler.merge_header(header.copy()) self.assertEqual(handler.header, header) header['id'] = '456' header['blah'] = 'lol' handler.merge_header(header.copy()) del header['blah'] self.assertEqual(handler.header, header) header['id'] = '789' uheader = {'id': '789'} handler.merge_header(uheader.copy()) self.assertEqual(handler.header, header) self.assertIsInstance(handler.header['id'], type('')) header['id'] = '123' uheader = {'id': '123'} handler.merge_header(uheader.copy()) self.assertEqual(handler.header, header) self.assertIsInstance(handler.header['id'], type('')) self.assertEqual(list(handler.header.keys()), ['id']) self.assertIsInstance(list(handler.header.keys())[0], type('')) handler.header = {} handler.merge_header(uheader.copy()) self.assertEqual(handler.header, header) self.assertIsInstance(handler.header['id'], type('')) self.assertEqual(list(handler.header.keys()), ['id']) self.assertIsInstance(list(handler.header.keys())[0], type('')) # ===== def test_id_affix(self): handler = self.handler handler.title = 'test' def test_header_override(): handler.header['id_affix'] = None self.assertEqual(handler.id_affix, None) handler.header['id_affix'] = '' self.assertEqual(handler.id_affix, '098f') handler.header['id_affix'] = 'prefix' self.assertEqual(handler.id_affix, 'prefix') # ----- self.assertEqual(handler.id_affix, None) # ----- handler.options['id_affix'] = None self.assertEqual(handler.id_affix, None) test_header_override() # ----- del handler.header['id_affix'] handler.options['id_affix'] = '' self.assertEqual(handler.id_affix, '098f') test_header_override() # ----- del handler.header['id_affix'] handler.options['id_affix'] = 'prefix' self.assertEqual(handler.id_affix, 'prefix') test_header_override() # ===== test_markup_affixes_EXPECT1 = 'prefix-content-suffix' test_markup_affixes_EXPECT2 = 'foobar' test_markup_affixes_EXPECT3 = 'title' def test_markup_affixes(self): handler = self.handler handler.title = 'title' handler.markup = 'content' handler.options['markup_prefix'] = 'prefix-' handler.options['markup_suffix'] = '-suffix' self.assertEqual( handler.generate(), self.test_markup_affixes_EXPECT1) self.assertEqual( handler.generate('foobar'), self.test_markup_affixes_EXPECT2) self.assertEqual( handler.generate_title(), self.test_markup_affixes_EXPECT3) # ===== def test_split_header_markup(self): handler = self.handler handler.source = '''xoxo !b oxox abc= foo def:bar post content''' header, markup = handler.split_header_markup() expect = {'abc': 'foo', 'def': 'bar'} self.assertEqual(header, expect) self.assertEqual(markup, 'post content') source = '%s!b\n' % handler.PREFIX_HEAD source += handler.HEADER_FMT % ('abc', 'foo') + '\n' source += handler.HEADER_FMT % ('def', 'bar') + '\n' if handler.PREFIX_END: source += handler.PREFIX_END + '\n' source += '\npost content' handler.source = source header, markup = handler.split_header_markup() self.assertEqual(header, expect) self.assertEqual(markup, 'post content') # ===== def test_generate_header(self): handler = self.handler handler.set_header('id', '123') expect = '%s!b\n%s\n' % (handler.PREFIX_HEAD, handler.HEADER_FMT % ('id', '123')) if handler.PREFIX_END: expect += handler.PREFIX_END + '\n' self.assertEqual(handler.generate_header(), expect) # ===== def test_generate_title_oneline(self): handler = self.handler title = 'foobar' expect = 'foobar' result = handler.generate_title(title) self.assertEqual(result, expect) def test_generate_title_multiline(self): handler = self.handler title = 'foo\nbar\n\nblah' expect = 'foo bar blah' result = handler.generate_title(title) self.assertEqual(result, expect) test_generate_title_common_markup_EXPECT = 'foo *bar*' def test_generate_title_common_markup(self): handler = self.handler title = 'foo *bar*' result = handler.generate_title(title) expect = self.test_generate_title_common_markup_EXPECT self.assertEqual(result, expect) # ===== test_generate_str_MARKUP = '\xc3\xa1' test_generate_str_EXPECT = '\xc3\xa1' def test_generate__str(self): handler = self.handler html = handler._generate(self.test_generate_str_MARKUP) self.assertEqual(html, self.test_generate_str_EXPECT) self.assertIsInstance(html, type('')) def test_generate_str(self): handler = self.handler handler.markup = self.test_generate_str_MARKUP html = handler.generate() self.assertEqual(html, self.test_generate_str_EXPECT) self.assertIsInstance(html, type('')) # ===== test_smartypants_MARKUP = 'foo "bar"' test_smartypants_EXPECT = 'foo &#8220;bar&#8221;' def test_smartypants(self): handler = self.handler handler.options['smartypants'] = True handler.markup = self.test_smartypants_MARKUP html = handler.generate() self.assertEqual(html, self.test_smartypants_EXPECT) self.assertIsInstance(html, type('')) # ===== def test_generate_post(self): handler = self.handler handler.source = '''!b abc=foo title=the title id=123 blog: 456 post content''' header, markup = handler.split_header_markup() handler.header = header post = handler.generate_post() self.assertEqual(post, { 'title': 'the title', 'draft': False, 'id': '123', 'blog': {'id': '456'} }) # ===== def test_update_source(self): handler = self.handler source = '%s!b\n%s\n' % (handler.PREFIX_HEAD, handler.HEADER_FMT % ('id', '123')) if handler.PREFIX_END: source += handler.PREFIX_END + '\n' source += '\npost content' handler.source = source header, markup = handler.split_header_markup() handler.header = header handler.markup = markup handler.update_source() self.assertEqual(handler.source, source) handler.options['markup_prefix'] = 'PREFIX' handler.update_source() self.assertEqual(handler.source, source) # ===== test_embed_images_src = 'tests/test.png' test_embed_images_data_URI = ( 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAI' 'AAACQd1PeAAAADElEQVQI12Oorq4GAALmAXLRBAkWAAAAAElFTkSuQmCC' ) test_embed_images_SOURCE1 = '<img src="http://example.com/example.png"/>' test_embed_images_EXPECT1 = test_embed_images_SOURCE1 test_embed_images_SOURCE2 = '<img src="tests/test.png"/>' test_embed_images_EXPECT2 = '<img src="%s"/>' % test_embed_images_data_URI test_embed_images_SOURCE3 = '<img alt="foo" src="tests/test.png"/>' test_embed_images_EXPECT3 = '<img alt="foo" src="%s"/>' % ( test_embed_images_data_URI) test_embed_images_SOURCE4 = '<img src="tests/test.png" title="bar"/>' test_embed_images_EXPECT4 = '<img src="%s" title="bar"/>' % ( test_embed_images_data_URI) test_embed_images_SOURCE5 = '<img src="%s"/>' % test_embed_images_data_URI test_embed_images_EXPECT5 = test_embed_images_SOURCE5 def test_embed_images(self): handler = self.handler result = handler.embed_images(self.test_embed_images_SOURCE1) self.assertEqual(result, self.test_embed_images_EXPECT1) result = handler.embed_images(self.test_embed_images_SOURCE2) self.assertEqual(result, self.test_embed_images_EXPECT2) result = handler.embed_images(self.test_embed_images_SOURCE3) self.assertEqual(result, self.test_embed_images_EXPECT3) result = handler.embed_images(self.test_embed_images_SOURCE4) self.assertEqual(result, self.test_embed_images_EXPECT4) result = handler.embed_images(self.test_embed_images_SOURCE5) self.assertEqual(result, self.test_embed_images_EXPECT5) test_embed_images_generate_SOURCE = '<img src="tests/test.png"/>' test_embed_images_generate_EXPECT = '<img src="%s"/>' % ( test_embed_images_data_URI) def test_embed_images_generate(self): handler = self.handler handler.options['embed_images'] = True handler.markup = self.test_embed_images_generate_SOURCE html = handler.generate() self.assertEqual(html, self.test_embed_images_generate_EXPECT)
/tests/test_bpy_handlers_mkd.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import unicode_literals import unittest import test_bpy_handlers_base as test_base from bpy.handlers.mkd import Handler class HandlerTestCase(test_base.BaseHandlerTestCase): def setUp(self): self.handler = Handler(None) # ===== test_markup_affixes_EXPECT1 = '<p>prefix-content-suffix</p>' test_markup_affixes_EXPECT2 = '<p>foobar</p>' # ===== test_generate_title_common_markup_EXPECT = 'foo <em>bar</em>' # ===== test_generate_str_EXPECT = '<p>\xc3\xa1</p>' # ===== test_smartypants_EXPECT = '<p>foo &#8220;bar&#8221;</p>' # ===== @unittest.skip('tested in BaseHandler') def test_embed_images(self): pass test_embed_images_generate_SOURCE = '![tests/test.png](tests/test.png)' test_embed_images_generate_EXPECT = ( '<p><img alt="tests/test.png" src="%s" /></p>' % ( test_base.BaseHandlerTestCase.test_embed_images_data_URI ) )
/tests/test_bpy_handlers_rst.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import unicode_literals import unittest from docutils import nodes from docutils.parsers.rst import Directive import test_bpy_handlers_base as test_base from bpy.handlers.rst import Handler, register_directive, register_role class HandlerTestCase(test_base.BaseHandlerTestCase): def setUp(self): self.handler = Handler(None) # ===== def test_options_register_directive_decorator(self): source = '.. dtestdir::' expect = '<p>TEST</p>' @register_directive('dtestdir') class dTestDir(Directive): def run(self): return [nodes.raw('', expect, format='html')] handler = Handler(None) self.assertEqual(handler.generate(source), expect) def test_options_register_role_decorator(self): source = 'abc :dtestrole:`123` def' expect = '<p>abc <em>TEST</em> def</p>' @register_role('dtestrole') def dTestRole(*args, **kwds): return [nodes.raw('', '<em>TEST</em>', format='html')], [] handler = Handler(None) self.assertEqual(handler.generate(source), expect) def test_options_register_directives(self): source = '.. testdir::' expect = '<p>TEST</p>' class TestDir(Directive): def run(self): return [nodes.raw('', expect, format='html')] options = {'register_directives': {'testdir': TestDir}} handler = Handler(None, options) self.assertEqual(handler.generate(source), expect) def test_options_register_roles(self): source = 'abc :testrole:`123` def' expect = '<p>abc <em>TEST</em> def</p>' def TestRole(*args, **kwds): return [nodes.raw('', '<em>TEST</em>', format='html')], [] options = {'register_roles': {'testrole': TestRole}} handler = Handler(None, options) self.assertEqual(handler.generate(source), expect) # ===== def test_id_affix(self): handler = self.handler handler.title = 'test' source = ('Test Handler\n' '------------') html_base = ('<div class="section" id="%stest-handler">\n' '<h2>Test Handler</h2>\n' '</div>') html = html_base % '' self.assertEqual(handler.generate(source), html) handler.header['id_affix'] = '' html = html_base % '098f-' self.assertEqual(handler.generate(source), html) self.assertEqual(handler.modified, True) self.assertEqual(handler.generate_header(), '''.. !b id_affix: 098f ''') handler.header['id_affix'] = 'foobar-prefix' html = html_base % 'foobar-prefix-' self.assertEqual(handler.generate(source), html) # ===== test_markup_affixes_EXPECT1 = '<p>prefix-content-suffix</p>' test_markup_affixes_EXPECT2 = '<p>foobar</p>' # ===== test_generate_title_common_markup_EXPECT = 'foo <em>bar</em>' # ===== test_generate_str_EXPECT = '<p>\xc3\xa1</p>' # ===== test_smartypants_EXPECT = '<p>foo &#8220;bar&#8221;</p>' # ===== @unittest.skip('tested in BaseHandler') def test_embed_images(self): pass test_embed_images_generate_SOURCE = '.. image:: tests/test.png' test_embed_images_generate_EXPECT = ( '<img alt="tests/test.png" src="%s" />' % ( test_base.BaseHandlerTestCase.test_embed_images_data_URI ) )
/tests/test_bpy_handlers_text.py
# Copyright (C) 2013, 2014 Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import test_bpy_handlers_base as test_base from bpy.handlers.text import Handler class HandlerTestCase(test_base.BaseHandlerTestCase): def setUp(self): self.handler = Handler(None) # ===== def test_generate_title_pre_wrap_oneline(self): handler = self.handler handler.options['pre_wrap'] = True super(HandlerTestCase, self).test_generate_title_oneline() def test_generate_pre_wrap_multiline(self): handler = self.handler handler.options['pre_wrap'] = True super(HandlerTestCase, self).test_generate_title_multiline() def test_generate_pre_wrap_common_markup(self): handler = self.handler handler.options['pre_wrap'] = True super(HandlerTestCase, self).test_generate_title_common_markup() # ===== def test_embed_images(self): handler = self.handler self.assertRaises(RuntimeError, handler.embed_images, ('', )) def test_embed_images_generate(self): handler = self.handler handler.options['embed_images'] = True handler.markup = '<img src="http://example.com/example.png"/>' html = handler.generate() EXPECT = '&lt;img src="http://example.com/example.png"/&gt;' self.assertEqual(html, EXPECT)
/tests/test_setup.py
# Copyright (C) 2013 by Yu-Jie Lin # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import unittest from docutils.core import publish_string class SetupTestCase(unittest.TestCase): def test_long_description(self): """Ensure long description can be generated""" with open('README.rst') as f: long_description = f.read() overrides = { # raises exception at warning level (2) 'halt_level': 2, 'raw_enabled': False, } html = publish_string(long_description, writer_name='html', settings_overrides=overrides) self.assertTrue(html)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Ashafix/argparse2HTML
refs/heads/master
{"/blueprints/celery_task.py": ["/args.py"], "/blueprints/subprocess_task.py": ["/args.py"], "/server.py": ["/argparse2dict.py"], "/args.py": ["/example_argparser.py"]}
└── ├── argparse2dict.py ├── args.py ├── blueprints │ ├── celery_task.py │ └── subprocess_task.py ├── config.py ├── example_argparser.py └── server.py
/argparse2dict.py
import argparse from collections import OrderedDict def argparser_to_dict(parser): """ Converts an ArgumentParser from the argparse module to a dictionary :param parser: ArgumentParser, argparser which should be converted to a dictionary :return: dict, key: argparser.dest, value: dict with key: argparse.attribute and value: argparse.attribute_value """ args = [a for a in parser._actions if type(a) not in (argparse._HelpAction, argparse._VersionAction)] arg_dict = OrderedDict() for arg in args: arg_dict[arg.dest] = {k: arg.__getattribute__(k) for k in dir(arg) if (not k.startswith('_') and k not in ('container', ))} type_ = arg_dict[arg.dest].get('type') if type_ is not None: type_ = str(type_) if type_.startswith('<class') and "'" in type_: arg_dict[arg.dest]['type'] = type_.split("'")[1] default = arg_dict[arg.dest].get('default', False) if default is None: del arg_dict[arg.dest]['default'] return arg_dict
/args.py
from example_argparser import parser1, simple_math from example_argparser import parser_cycler, cycler from example_argparser import parser_wrapper, wrapper from collections import namedtuple Parser = namedtuple('CLI', ('name', 'parser', 'function')) parsers = [Parser(name=parser1.prog, parser=parser1, function=simple_math), Parser(name=parser_cycler.prog, parser=parser_cycler, function=cycler), Parser(name=parser_wrapper.prog, parser=parser_wrapper, function=wrapper) ] def get_cli(cmd): cmds = [parser for parser in parsers if parser.name == cmd] if len(cmds) == 0: return False, None if len(cmds) > 1: return False, (500, "more than one parser with prog name '{}' found ".format(cmd)) cli = cmds[0] return True, cli
/blueprints/celery_task.py
import os import logging import contextlib import dill import binascii from flask import request, jsonify, Response from flask import Blueprint, redirect, url_for from celery import Celery from args import get_cli from config import JOB_FOLDER, CELERY_BROKER_URL, RESULT_BACKEND celery = Celery('argparser_server', broker=CELERY_BROKER_URL, backend=RESULT_BACKEND) FILENAME_LOG = os.path.join(JOB_FOLDER, '{}.log') app_with_celery = Blueprint('app_with_celery', __name__, template_folder='templates') @celery.task(name='server.background_task', bind=True) def background_task(self, function, args): logger = logging.getLogger(self.request.id) filehandler = logging.FileHandler(FILENAME_LOG.format(self.request.id)) logger.addHandler(filehandler) function = dill.loads(binascii.a2b_base64(function)) args = dill.loads(binascii.a2b_base64(args)) with contextlib.redirect_stdout(LoggerWriter(logger, 20)): return function(args) @app_with_celery.route('/run/<command>', methods=['POST']) def run_post(command): params = request.get_json() found, cli = get_cli(command) if not found: return redirect (url_for('list_available_commands')) func = binascii.b2a_base64(dill.dumps(cli.function)).decode() args = cli.parser.parse_args(params) base64_args = binascii.b2a_base64(dill.dumps(args)).decode() task = background_task.apply_async(args=(func, base64_args)) return task.id @app_with_celery.route('/status/<task_id>') def task_status(task_id): task = background_task.AsyncResult(task_id) if request.headers.get('accept') == 'text/event-stream': def status(): while task.status not in ('SUCCESS', 'FAILURE', 'REVOKED'): fname = FILENAME_LOG.format(task_id) resp = ['data: \n'] if os.path.isfile(fname): with open(fname, 'r') as f: resp = ["data: {}".format(line.strip()) for line in f.readlines()] resp.append('\n') yield '\n'.join(resp) yield "data: \n\n" return Response(status(), content_type='text/event-stream') if task.status == 'SUCCESS': return jsonify(task.result) return jsonify(task.status) class LoggerWriter: def __init__(self, logger, level): self.logger = logger self.level = level def write(self, message): if message != '\n': self.logger.log(logging.CRITICAL, message) def flush(self, *kargs, **kwargs): pass
/blueprints/subprocess_task.py
import sys import os import uuid import psutil import subprocess from flask import request, jsonify, Response from flask import Blueprint, redirect, url_for from args import get_cli from config import JOB_FOLDER FILENAME_LOG = os.path.join(JOB_FOLDER, '{}.log') FILENAME_PID = os.path.join(JOB_FOLDER, '{}.pid') SEP = '=-' * 30 app_with_subprocess = Blueprint('app_with_subprocess ', __name__, template_folder='templates') @app_with_subprocess.route('/run/<command>', methods=['POST']) def run_post(command): params = request.get_json() found, cli = get_cli(command) if not found: return redirect(url_for('list_available_commands')) args = cli.parser.parse_args(params) code = 'from {module} import {function}; import argparse; args = argparse.Namespace(**{args}); r = ({function}(args)); print(\'{SEP}\'); print(r)'.format( module=cli.function.__module__, function=cli.function.__name__, SEP=SEP, args=args.__dict__ ) task_id = str(uuid.uuid4()) f = open(FILENAME_LOG.format(task_id), 'w+') p = subprocess.Popen([sys.executable, '-u', '-c', code], stderr=f, stdout=f, bufsize=0) with open(FILENAME_PID.format(task_id), 'w') as ff: ff.write(str(p.pid)) return str(task_id) @app_with_subprocess.route('/status/<task_id>') def task_status(task_id): try: with open(FILENAME_PID.format(task_id), 'r') as f: pid = int(f.read()) process = psutil.Process(pid) except (FileNotFoundError, psutil.NoSuchProcess, psutil.AccessDenied): process = None fname = FILENAME_LOG.format(task_id) if request.headers.get('accept') == 'text/event-stream': def status(): while process is not None: try: process_running = process.status() == psutil.STATUS_RUNNING except psutil.NoSuchProcess: process_running = False if not process_running: break try: with open(fname, 'r') as f: resp = ["data: {}".format(line.strip()) for line in f.readlines()] except (FileNotFoundError, IOError): resp = ['data: \n'] resp.append('\n') yield '\n'.join(resp) yield 'data: \n\n' return Response(status(), content_type='text/event-stream') try: with open(fname, 'r') as f: lines = f.readlines() i = len(lines) - 1 while i >= 0: if lines[i].strip() == SEP: break i -= 1 if len(lines) > 0 and i >= 0: resp = '\n'.join([line.strip() for line in lines[i + 1:]]) else: resp = '' except (FileNotFoundError, IOError): resp = '' return jsonify(resp)
/config.py
import os CELERY_BROKER_URL = 'redis://localhost:6379/0' RESULT_BACKEND = 'redis://localhost:6379/0' USE_CELERY = False SERVER_PORT = 5000 JOB_FOLDER = os.path.join(os.getcwd(), 'jobs') os.makedirs(JOB_FOLDER, exist_ok=True)
/example_argparser.py
import argparse import itertools import time import builtins parser1 = argparse.ArgumentParser(prog='simple math', description='a simple math command line interface') parser1.add_argument('x', help='first value', default='2', type=int) parser1.add_argument('y', help='second value', default='3', type=int) parser1.add_argument('--action', help='which method to apply', default='min', choices=['min', 'max', 'sum'], type=str) parser_cycler = argparse.ArgumentParser(prog='cycler') parser_cycler.add_argument('--delay', help='delay in seconds', default=0.1, type=float) parser_cycler.add_argument('--max', help='Number of iterations', default=20, type=int) parser_cycler.add_argument('--characters', help='Cycle through those characters', default='\\|/-', type=str) parser_wrapper = argparse.ArgumentParser(prog='wrapper') parser_wrapper.add_argument('columns', help='List of comma separated column names', default='a,b,c', type=str) parser_wrapper.add_argument('values', help='List of comma separated values', default='1,2,3', type=str) def simple_math(args): vals = [args.x, args.y] f = getattr(builtins, args.action) return f(vals) def cycler(args): for i, c in enumerate(itertools.cycle(args.characters)): print(c) time.sleep(args.delay) if i >= args.max: break return 'Finished after {} iterations'.format(i) def wrapper(args): columns = args.columns.split(',') values = args.values.split(',') return complex_function(columns, values) def complex_function(columns, values): resp = [] for c, col in enumerate(columns): resp.append('{}: {}'.format(col, values[c])) return '\n'.join(resp)
/server.py
import os from collections import OrderedDict import importlib import socket from flask import Flask, request, url_for, redirect import jinja2 import args from argparse2dict import argparser_to_dict from config import CELERY_BROKER_URL, RESULT_BACKEND, USE_CELERY, SERVER_PORT app = Flask(__name__) if USE_CELERY: from blueprints.celery_task import app_with_celery app.config['CELERY_BROKER_URL'] = CELERY_BROKER_URL app.config['result_backend'] = RESULT_BACKEND app.config['task_track_started'] = True app.config['worker_redirect_stdouts'] = False app.config['worker_hijack_root_logger'] = False app.register_blueprint(app_with_celery) else: from blueprints.subprocess_task import app_with_subprocess app.register_blueprint(app_with_subprocess) TEMPLATE_FOLDER = './templates' TEMPLATE_FILE = "default_template.html" SERVER_NAME = socket.gethostbyname(socket.gethostname()) template_loader = jinja2.FileSystemLoader(searchpath=TEMPLATE_FOLDER) template_env = jinja2.Environment(loader=template_loader) @app.route('/') def show_command_line_options(): cmd = request.args.get('command') found, cli = args.get_cli(cmd) if not found: return redirect(url_for('list_available_commands')) parser = cli.parser if os.path.isfile(os.path.join(TEMPLATE_FOLDER, '{}.html'.format(cmd))): template_file = '{}.html'.format(cmd) else: template_file = TEMPLATE_FILE template = template_env.get_template(template_file) server = 'http://{}:{}/run/{}'.format(SERVER_NAME, SERVER_PORT, cmd) return template.render(title=cli.name, description=cli.parser.description, args=argparser_to_dict(parser), server=server, css_url=url_for('static', filename='css/main.css')) @app.route('/list') def list_available_commands(): template = template_env.get_template('list_commands.html') cmds = {parser.name: 'http://{}:{}/?command={}'.format(SERVER_NAME, SERVER_PORT, parser.name) for parser in args.parsers} cmds_sorted = OrderedDict() for cmd in sorted(cmds.keys()): cmds_sorted[cmd] = cmds[cmd] return template.render(args=cmds_sorted, css_url=url_for('static', filename='css/main.css')) @app.route('/refresh') def refresh(): importlib.reload(args) return 'refreshed argparsers' if __name__ == '__main__': app.run(threaded=True, host='0.0.0.0', port=SERVER_PORT)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
marvin939/ZombiePygame
refs/heads/master
{"/tests/test_effects.py": ["/game.py", "/effects.py"], "/tests/test_projectile.py": ["/weapon.py"], "/tests/test_warhead.py": ["/weapon.py"], "/demo/demo_projectile.py": ["/weapon.py"], "/demo/demo_weapon.py": ["/weapon.py", "/entity.py"], "/mobs.py": ["/weapon.py"], "/demo/demo_effects.py": ["/manager.py"], "/demo/demo_image_manager.py": ["/manager.py"], "/demo/demo_rotate_towards_mouse.py": ["/manager.py"], "/demo/demo_turret_rotate.py": ["/manager.py"], "/run.py": ["/manager.py"], "/tests/test_image_manager.py": ["/manager.py"], "/tests/test_mobs.py": ["/manager.py"], "/effects.py": ["/entity.py"]}
└── ├── demo │ ├── demo_effects.py │ ├── demo_image_manager.py │ ├── demo_projectile.py │ ├── demo_rotate_towards_mouse.py │ ├── demo_turret_rotate.py │ └── demo_weapon.py ├── effects.py ├── entity.py ├── game.py ├── manager.py ├── mobs.py ├── run.py ├── tests │ ├── test_effects.py │ ├── test_entity.py │ ├── test_image_manager.py │ ├── test_mobs.py │ ├── test_projectile.py │ ├── test_utilities.py │ ├── test_warhead.py │ ├── test_weapon.py │ └── test_world.py ├── utilities.py └── weapon.py
/demo/demo_effects.py
import time import sys import pygame from game import * from effects import * from pygame.locals import * from manager import ImageManager GREEN = (0, 255, 0) FPS = 30 """ bullet_travel = BulletTravelEffect(world, Vector2(0, 0), Vector2(320, 240)) world.add_entity(bullet_travel) """ image_dude = None def main(): pygame.init() clock = pygame.time.Clock() screen = pygame.display.set_mode((640, 480)) world = World() global image_dude image_dude = ImageManager('../data/images') time_passed = 0 while True: #print(time_passed) for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() elif event.type == MOUSEBUTTONDOWN: print(event) if event.button is 1: spawn_effect(world) print('fx added') elif event.type == KEYDOWN: if event.key == K_e: spawn_explosion_effect(world) elif event.key == K_i: # Show entities print(world.entities.values()) if pygame.mouse.get_pressed()[2]: spawn_effect(world) # if pygame.mouse.get_pressed()[3]: # print('world entities:') # print(world.entities.values()) world.process(time_passed) screen.fill((0, 0, 0)) world.render(screen) # pygame.draw.circle(screen, RED, pygame.mouse.get_pos(), 5) pygame.display.update() # simulate FPS drop #time.sleep(0.2) time_passed = clock.tick(FPS) def spawn_effect(world): bullet_fx = BulletTravelEffect(world, Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2), Vector2(*pygame.mouse.get_pos()), GREEN, speed=500) world.add_entity(bullet_fx) def spawn_explosion_effect(world): explosion = ExplosionEffect(world, Vector2(*pygame.mouse.get_pos()), 50, color=VIOLET) world.add_entity(explosion) if __name__ == '__main__': main()
/demo/demo_image_manager.py
from manager import ImageManager import sys import os import pygame import time # Add 1-dir-up to path (contains manager.py, and errors.py) # sys.path += [os.path.join(os.getcwd(), '..')] '''No need to do; just change the working directory of the file @ Run->Edit Configurations... Don't forget to change relative paths of instances (eg. ImageManager('../data/images/') to ImageManager('data/images/')''' def main(): pygame.init() SCREEN_WIDTH, SCREEN_HEIGHT = SCREEN_SIZE = (640, 480) screen = pygame.display.set_mode(SCREEN_SIZE) pygame.display.set_caption('[Demo] ImageManager image loading') imagedude = ImageManager('data/images') imagedude['backgroundB.jpg'] = pygame.transform.scale(imagedude['backgroundB.jpg'], SCREEN_SIZE) screen.blit(imagedude['backgroundB.jpg'], (0, 0)) pygame.display.update() time.sleep(2) pygame.quit() sys.exit() if __name__ == '__main__': main()
/demo/demo_projectile.py
import sys import pygame from pygame.math import Vector2 from game import * from pygame.locals import * from weapon import Projectile pygame.init() screen = pygame.display.set_mode(SCREEN_SIZE) pygame.display.set_caption('Projectile object demonstration') clock = pygame.time.Clock() world = World() CENTER_VEC = Vector2(SCREEN_CENTER) def main(): time_passed = 0 while True: for event in pygame.event.get(): if event.type == QUIT: terminate() elif event.type == MOUSEBUTTONDOWN: spawn_projectile(CENTER_VEC, event.pos) print(world.entity_count()) lmb, mmb, rmb = pygame.mouse.get_pressed() if lmb: spawn_projectile(CENTER_VEC, event.pos) world.process(time_passed) screen.fill(BLACK) world.render(screen) pygame.display.update() time_passed = clock.tick(FPS) pass def spawn_projectile(from_pos, to_pos): direction = (Vector2(to_pos) - Vector2(from_pos)).normalize() print('dir', direction) proj = Projectile(world, 'bullet', None, CENTER_VEC, direction, max_distance=100) world.add_entity(proj) def terminate(): pygame.quit() sys.exit() if __name__ == '__main__': main()
/demo/demo_rotate_towards_mouse.py
import pygame from manager import ImageManager from game import * from pygame.locals import * from pygame.math import Vector2 def main(): pygame.init() screen = pygame.display.set_mode(SCREEN_SIZE) clock = pygame.time.Clock() image_manager = ImageManager('../data/images') sprite_image = image_manager['sentrygun.png'] sprite_location = Vector2(SCREEN_WIDTH / 2.0, SCREEN_HEIGHT / 2.0) circles = [] print(sprite_location) time_passed = 0 while True: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() return screen.fill((0, 0, 0)) mouse_x, mouse_y = mouse_pos = pygame.mouse.get_pos() mouse_location = Vector2(mouse_pos) vec_diff = mouse_location - sprite_location angle = -math.atan2(vec_diff.y, vec_diff.x) # atan2's result is inverted controls, so * -1 #print(angle) rotated_image = pygame.transform.rotate(sprite_image, math.degrees(angle)) rotated_x = (SCREEN_WIDTH - rotated_image.get_width()) / 2.0 rotated_y = (SCREEN_HEIGHT - rotated_image.get_height()) / 2.0 # Draw center cross-hair lines: pygame.draw.line(screen, (255, 0, 0), (0, SCREEN_HEIGHT/2), (SCREEN_WIDTH, SCREEN_HEIGHT/2)) pygame.draw.line(screen, (255, 0, 0), (SCREEN_WIDTH / 2, 0), (SCREEN_WIDTH / 2, SCREEN_HEIGHT)) if pygame.mouse.get_pressed()[0]: circles += [mouse_pos] for circle_pos in circles: pygame.draw.circle(screen, (0, 255, 0), circle_pos, 5) screen.blit(sprite_image, mouse_pos) screen.blit(rotated_image, (rotated_x, rotated_y)) # Why is it the angle offset!? #pygame.display.update(pygame.Rect(rotated_x, rotated_y, rotated_image.get_width(), rotated_image.get_height())) pygame.display.update() time_passed = clock.tick(FPS) if __name__ == '__main__': main()
/demo/demo_turret_rotate.py
from encodings.punycode import selective_find import pygame from manager import ImageManager from game import * from pygame.locals import * from pygame.math import Vector2 from mobs import * image_manager = None def main(): pygame.init() screen = pygame.display.set_mode(SCREEN_SIZE) clock = pygame.time.Clock() global image_manager image_manager = ImageManager('../data/images') world = World() sentry_gun = SentryGun(world, image_manager['sentrygun.png'], Vector2(SCREEN_WIDTH / 2.0, SCREEN_HEIGHT / 2.0)) ''' zombie = Zombie(world, image_manager['zombie.png'], Vector2(*pygame.mouse.get_pos())) zombie.hp = math.inf zombie.brain = StateMachine() # Reset brain to 0 ''' world.add_entity(sentry_gun) #world.add_entity(zombie) #sentry_gun.target = zombie time_passed = 0 while True: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() return screen.fill((0, 0, 0)) world.process(time_passed) mouse_x, mouse_y = mouse_pos = pygame.mouse.get_pos() mouse_location = Vector2(mouse_pos) #zombie.location = mouse_location if any(pygame.mouse.get_pressed()): spawn_zombie(world, mouse_location) # Draw center cross-hair lines: pygame.draw.line(screen, (255, 0, 0), (0, SCREEN_HEIGHT/2), (SCREEN_WIDTH, SCREEN_HEIGHT/2)) pygame.draw.line(screen, (255, 0, 0), (SCREEN_WIDTH / 2, 0), (SCREEN_WIDTH / 2, SCREEN_HEIGHT)) world.render(screen) #print(sentry_gun.brain.active_state.name) #print('Entity count:', len(world.entities.keys())) #print(sentry_gun.turret_angle) #print(GameEntity.get_angle(sentry_gun.location, zombie.location)) pygame.display.update() time_passed = clock.tick(FPS) def spawn_zombie(world, mouse_location): zombie = Zombie(world, image_manager['zombie.png'], mouse_location) world.add_entity(zombie) print('There are {} entities in this world.'.format(len(world.entities.keys()))) if __name__ == '__main__': main()
/demo/demo_weapon.py
import sys import pygame from pygame.math import Vector2 from game import * from pygame.locals import * from weapon import Projectile, WeaponSimplified from entity import GameEntity import utilities pygame.init() screen = pygame.display.set_mode(SCREEN_SIZE) pygame.display.set_caption('Projectile object demonstration') clock = pygame.time.Clock() world = World() CENTER_VEC = Vector2(SCREEN_CENTER) AMMO = 10000 SPREAD = 10 FIRE_RATE = 10 def main(): time_passed = 0 player = GameEntity(world, 'player', None, CENTER_VEC) world.add_entity(player) weapon = WeaponSimplified(world, player, FIRE_RATE, 0, AMMO, spread=SPREAD) ready2fire_surf = pygame.Surface((32, 32)) #font_obj = pygame.SysFont() #print('\n'.join(pygame.font.get_fonts())) font_obj = pygame.font.SysFont('freesans', 32) while True: for event in pygame.event.get(): if event.type == QUIT: terminate() elif event.type == MOUSEBUTTONDOWN: pass #print(world.entity_count()) elif event.type == MOUSEMOTION: angle = GameEntity.get_angle(player.location, Vector2(event.pos)) player.angle = angle elif event.type == KEYDOWN: if event.key == K_r: weapon.ammo = AMMO seconds_passed = time_passed / 1000 lmb, mmb, rmb = pygame.mouse.get_pressed() if any((lmb, mmb, rmb)): weapon.fire() world.process(time_passed) weapon.process(seconds_passed) screen.fill(BLACK) world.render(screen) ready2fire_surf.fill(GREEN if weapon.ready_to_fire else RED) screen.blit(ready2fire_surf, (0, 0)) ready2fire_text = font_obj.render('ready' if weapon.ready_to_fire else 'loading', True, WHITE) screen.blit(ready2fire_text, (32, 0)) pygame.display.set_caption('Weapon demo; Ammo: {ammo}'.format(ammo=weapon.ammo)) pygame.display.update() time_passed = clock.tick(FPS) pass # def spawn_projectile(from_pos, to_pos): # direction = (Vector2(to_pos) - Vector2(from_pos)).normalize() # proj = Projectile(world, 'bullet', None, CENTER_VEC, direction, max_distance=100) # world.add_entity(proj) def terminate(): pygame.quit() sys.exit() if __name__ == '__main__': main()
/effects.py
"""This is where effects go. eg. Explosions, bullet effects, etc. that disappear in time""" from entity import GameEntity from game import * import math class BulletTravelEffect(GameEntity): def __init__(self, world, origin, destination, color=YELLOW, speed=1000, length=50, duration=math.inf): super().__init__(world, 'bullet_travel', None, origin, destination) self.color = color self.DURATION = duration self.remaining_time = self.DURATION self.fx_head = Vector2(self.location) self.fx_tail = Vector2(self.location) self.fx_length = length self.fx_heading = (self.destination - self.location).normalize() self.fx_speed = speed self.stop_fx_head = False @property def fx_speed(self): return self.speed @fx_speed.setter def fx_speed(self, new_value): self.speed = new_value def process(self, seconds_passed): if self.fx_head != self.destination: head_to_destination_vec = self.destination - self.fx_head head_heading = head_to_destination_vec.normalize() distance = min(self.speed * seconds_passed, head_to_destination_vec.length()) self.fx_head += head_heading * distance if self.fx_tail != self.destination and (self.fx_head.distance_to(self.location) >= self.fx_length or self.fx_head == self.destination): tail_to_destination_vec = self.destination - self.fx_tail tail_heading = tail_to_destination_vec.normalize() distance = min(tail_to_destination_vec.length(), self.speed * seconds_passed) self.fx_tail += tail_heading * distance self.remaining_time -= seconds_passed if self.remaining_time <= 0 or (self.fx_tail == self.fx_head == self.destination): self.world.remove_entity(self) def render(self, surface): pygame.draw.aaline(surface, self.color, self.fx_tail, self.fx_head) class ExplosionEffect(GameEntity): def __init__(self, world, location, radius, color=YELLOW): super().__init__(world, 'explosion_effect', None, location) if type(radius) not in (float, int): raise TypeError('radius argument must be a float or int!') if radius <= 0: raise ValueError('radius value must be greater than 0.') if type(color) not in (pygame.Color, tuple, list): raise TypeError('color argument must be type tuple or pygame.Color!') else: if type(color) in (tuple, list) and len(color) != 3: raise ValueError('color tuple/list must have 3 values (R, G, B)') self.RADIUS = radius self.radius = radius self.color = color # self.DURATION = duration # self.remaining_time = duration def process(self, seconds_passed): self.radius -= seconds_passed * self.RADIUS * 2 # if self.remaining_time <= 0 or self.radius <= 0: if self.radius <= 0: self.world.remove_entity(self) return #self.remaining_time -= seconds_passed def render(self, surface): print('surface:', surface) print('color:', self.color) print('location:', self.location) print('radius:', self.radius) x = int(self.location.x) y = int(self.location.y) pygame.draw.circle(surface, self.color, (x, y), int(self.radius)) #pygame.draw.circle(surface, self.color, self.location, int(self.radius)) #pygame.draw.circle() class ShockwaveEffect(GameEntity): pass
/entity.py
from pygame.math import Vector2 import math import pygame import utilities #from mobs import * class GameEntity: """GameEntity that has states""" def __init__(self, world, name, image, location=None, destination=None, speed=0): self.world = world self.name = name self.image = image self.location = Vector2(location) if location is not None else Vector2(0, 0) self.destination = Vector2(destination) if destination is not None else Vector2(0, 0) self.speed = speed self.id = 0 self.__angle = 0.0 self.__rect = None # represents the boundary rectangle self.__rect_offset = None self.render_offset = None # how much to offset the image by (relative to location) when rendering to a surface @property def angle(self): return utilities.unit_angle(self.__angle) @angle.setter def angle(self, angle): self.__angle = utilities.unit_angle(angle) def render(self, surface): if self.image is None: return x, y = 0, 0 if self.render_offset is not None: x = self.location.x + self.render_offset.x y = self.location.y + self.render_offset.y else: x, y = self.location w, h = self.image.get_size() surface.blit(self.image, (x - w / 2, y - h / 2)) def process(self, seconds_passed): if self.speed > 0 and self.location != self.destination: vec_to_destination = self.destination - self.location distance_to_destination = vec_to_destination.length() heading = vec_to_destination.normalize() travel_distance = min(distance_to_destination, seconds_passed * self.speed) self.location += travel_distance * heading def face_vector(self, vector): """Face the entity towards the vector's location, set the new angle, and return it""" vec_diff = vector - self.location new_angle = self.get_angle(self.location, vector) self.angle = new_angle return new_angle def face_entity(self, entity): """Face the entity towards the other entity's location, set the new angle, and return it""" return self.face_vector(entity.location) @staticmethod def get_angle(vectora, vectorb): """Retrieve the angle (radians) between vectora and vectorb, where vectorb is the end point, and vectora, the starting point""" vec_diff = vectorb - vectora #return -math.atan2(vec_diff.y, vec_diff.x) return utilities.unit_angle(-math.atan2(vec_diff.y, vec_diff.x)) def set_rect(self, rect, vec_offset=None): self.__rect = rect if vec_offset is not None: self.__rect_offset = vec_offset def get_rect(self): if self.__rect is not None: new_rect = pygame.Rect(self.__rect) new_rect.center = self.location if self.__rect_offset is not None: new_rect.x += self.__rect_offset.x new_rect.y += self.__rect_offset.y return new_rect img_rect = self.image.get_rect() img_rect.center = self.location return img_rect @property def rect(self): return self.get_rect() class SentientEntity(GameEntity): """GameEntity that has states, and is able to think...""" def __init__(self, world, name, image, location=None, destination=None, speed=0, friends=None, enemies=None): super().__init__(world, name, image, location, destination, speed) self.friends = friends self.enemies = enemies self.brain = StateMachine() def process(self, seconds_passed): self.brain.think() super().process(seconds_passed) def get_close_enemy(self, radius=100): for enemy in self.enemies: e = self.world.get_close_entity(enemy, self.location, radius) if e is not None: return e return None class State: def __init__(self, name): self.name = name def do_actions(self): pass def check_conditions(self): pass def entry_actions(self): pass def exit_actions(self): pass class StateMachine: def __init__(self): self.states = {} self.active_state = None def add_state(self, state): """Add a state to the internal dictionary""" self.states[state.name] = state def think(self): """Let the current state do it's thing""" # Only continue if there is an if self.active_state is None: return # Perform the actions of the active state and check conditions self.active_state.do_actions() new_state_name = self.active_state.check_conditions() if new_state_name is not None: self.set_state(new_state_name) def set_state(self, new_state_name): """Change state machine's active state""" # perform any exit actions of the current state if self.active_state is not None: self.active_state.exit_actions() if new_state_name not in self.states.keys(): print('Warning! "{}" not in self.states...'.format(new_state_name)) return # Switch state and perform entry actions of new state self.active_state = self.states[new_state_name] self.active_state.entry_actions()
/game.py
import copy import math import pygame from pygame.math import Vector2 FPS = 60 SCREEN_WIDTH, SCREEN_HEIGHT = SCREEN_SIZE = (640, 480) SCREEN_CENTER = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) TICK_SECOND = 1000 / FPS / 1000 # Colors BLACK = (0, 0, 0) RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) YELLOW = (255, 255, 0) WHITE = (255, 255, 255) VIOLET = (128, 0, 255) class World: def __init__(self): self.entities = {} self.entity_id = 0 self.background = pygame.Surface(SCREEN_SIZE) #.convert() self.background.fill(BLACK, (0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)) def add_entity(self, entity): """Store an entity, give it an id and advance the current entity_id""" self.entities[self.entity_id] = entity entity.id = self.entity_id self.entity_id += 1 def remove_entity(self, entity): if entity.id in self.entities.keys(): del self.entities[entity.id] def get(self, entity_id): """Retrieve an entity by id""" if entity_id in self.entities: return self.entities[entity_id] else: return None def process(self, time_passed): """Update every entity in the world""" seconds_passed = time_passed / 1000.0 entities_copy = copy.copy(self.entities) for entity in entities_copy.values(): entity.process(seconds_passed) def render(self, surface): """Draw the background and all the entities""" surface.blit(self.background, (0, 0)) for entity in self.entities.values(): entity.render(surface) def get_close_entity(self, name, location, radius=100): """Find an entity within the radius of a location""" location = Vector2(*location) for entity in self.entities.values(): if not entity.name == name: continue distance = location.distance_to(entity.location) if distance < radius: return entity return None def entities_with_name(self, name): def is_entity(entity): return entity.name == name return filter(is_entity, self.entities.values()) def entity_count(self): return len(self.entities.values())
/manager.py
import os import pygame from errors import * class ImageManager: """The thing that manages images""" def __init__(self, dir='.'): self.image_directory = os.path.abspath(dir) self.surf_dict = {} if pygame.display.get_surface() is None: raise ScreenNotInitialized('ImageManager instances require a screen to be already initialised!') def __getitem__(self, item): """Load the image even though it has not bee loaded before""" surface = None try: surface = self.surf_dict[item] except KeyError: # Image has not been loaded before if not isinstance(item, str): raise TypeError('argument item ({}) must be str!'.format(type(item))) image_path = self.__get_image_path(item) if not os.path.exists(image_path): raise FileNotFoundError('Path: {}'.format(image_path)) # Load the image and store into dictionary surface = pygame.image.load(image_path).convert_alpha() self.surf_dict[item] = surface return surface def __get_image_path(self, image_name): return os.path.join(self.image_directory, image_name) def __setitem__(self, image_name, surface): """Manually name an image surface (key-value pair)""" if not isinstance(surface, pygame.Surface): raise TypeError('surface argument ({}) must be a pygame.Surface type!'.format(surface)) self.surf_dict[image_name] = surface return surface
/mobs.py
from random import randint from entity import * from game import * from pygame.math import Vector2 import math import utilities from effects import * from weapon import WeaponSimplified class Zombie(SentientEntity): """A Zombie wandering aimlessly""" NAME = 'zombie' def __init__(self, world, image, location): super().__init__(world, self.NAME, image, location) self.brain.add_state(ZombieExploreState(self)) self.brain.add_state(ZombieAttackState(self)) self.brain.set_state('explore') self.MAX_HP = 80 self.hp = self.MAX_HP self.speed = 50 self.sight = 50 #self.enemies = [SentryGun.NAME, Survivor.NAME] def process(self, seconds_passed): super().process(seconds_passed) bullet_entity = self.world.get_close_entity('bullet', self.location, self.rect.width / 2) if bullet_entity is not None and bullet_entity.owner.name == SentryGun.NAME: self.hp -= bullet_entity.damage self.world.remove_entity(bullet_entity) if self.hp <= 0: self.world.remove_entity(self) def shot(self): pass class ZombieExploreState(State): def __init__(self, zombie): super().__init__('explore') self.entity = zombie def do_actions(self): # Change directions at least every 10th frame if randint(0, 100) == 1: self.random_destination() def check_conditions(self): if self.entity.hp < self.entity.MAX_HP: return 'attack' return None def random_destination(self): lower_x_boundary = int(self.entity.image.get_width() / 2) lower_y_boundary = int(self.entity.image.get_height() / 2) upper_x_boundary = int(SCREEN_WIDTH - lower_x_boundary) upper_y_boundary = int(SCREEN_HEIGHT - lower_y_boundary) x = randint(lower_x_boundary, upper_x_boundary) y = randint(lower_y_boundary, upper_y_boundary) self.entity.destination = Vector2(x, y) class ZombieAttackState(ZombieExploreState): """Select a random survivor to attack until either is dead.""" def __init__(self, zombie): super().__init__(zombie) self.name = 'attack' self.zombie = zombie self.has_killed = False self.target = None self.original_speed = -1 self.reset_state() def entry_actions(self): #print('entering attack state...') self.original_speed = self.zombie.speed self.zombie.speed = 200 self.acquire_target() def acquire_target(self): if self.target is not None: return target = self.zombie.world.get_close_entity('survivor', self.zombie.location, radius=self.zombie.sight) if target is not None: self.target = target def do_actions(self): # Keep wandering until a target is found if self.target is None: if randint(1, 10) == 1: self.random_destination() self.acquire_target() return self.zombie.destination = self.target.location if self.zombie.location.distance_to(self.target.location) < 5: self.target.hp -= 1 if self.target.hp <= 0: self.has_killed = True def check_conditions(self): if self.has_killed: return 'explore' return None def exit_actions(self): self.zombie.hp = self.zombie.MAX_HP # replenish zombie health self.reset_state() def reset_state(self): self.zombie.speed = self.original_speed self.has_killed = False self.target = None class Survivor(SentientEntity): """A survivor shooting at zombies""" NAME = 'survivor' def __init__(self, world, image, location): super().__init__(world, self.NAME, image, location) self.brain.add_state(SurvivorExploreState(self)) self.brain.add_state(SurvivorPanicState(self)) self.brain.set_state('explore') self.MAX_HP = 20 self.hp = self.MAX_HP self.speed = 50 def process(self, seconds_passed): super().process(seconds_passed) if self.hp <= 0: self.world.remove_entity(self) def shot(self): pass class SurvivorExploreState(ZombieExploreState): def __init__(self, survivor): super().__init__(survivor) def do_actions(self): # Change directions at least every 100th frame if randint(0, 100) == 1: self.random_destination() def check_conditions(self): zombies = tuple(self.entity.world.entities_with_name('zombie')) if self.entity.hp < self.entity.MAX_HP and len(zombies) > 0: return 'panic' return None class SurvivorPanicState(SurvivorExploreState): def __init__(self, survivor): super().__init__(survivor) self.name = 'panic' self.original_speed = self.entity.speed def entry_actions(self): self.original_speed = self.entity.speed self.entity.speed = 300 def do_actions(self): # Change directions frequently if randint(0, 10) == 1: self.random_destination() def check_conditions(self): # Survivor should stop panicking once there are no more zombies... zombies = tuple(self.entity.world.entities_with_name('zombie')) #if not any(zombies): if len(zombies) <= 0: return 'explore' return None def exit_actions(self): self.entity.speed = self.original_speed class SentryGun(SentientEntity): NAME = 'sentry_gun' def __init__(self, world, image, location): super().__init__(world, self.NAME, image, location) self.TURRET_ROTATION_RATE_DEGREES = 180 self.turret_rotation_rate = math.radians(self.TURRET_ROTATION_RATE_DEGREES) # radians per second self.__turret_angle = 0 self.speed = 0 self.target = None self.CONE_OF_VISION_DEGREES = 60 self.cone_of_vision = math.radians(self.CONE_OF_VISION_DEGREES) # radians self.brain.add_state(self.ScanEnvironment(self)) self.brain.add_state(self.AttackTargetState(self)) self.brain.set_state('scan') self.weapon = WeaponSimplified(self.world, self, 10, 10, math.inf, spread=10) def process(self, seconds_passed): super().process(seconds_passed) if self.target is None: self.turret_angle += self.turret_rotation_rate * seconds_passed return self.weapon.process(seconds_passed) # # Rotate towards the target # angle = SentientEntity.get_angle(self.location, self.target.location) # self.turret_angle = angle # # attack target # self.target.hp -= 1 #self.world.add_entity(BulletTravelEffect(self.world, self.location, self.target.location, speed=2000, color=(128, 0, 255))) def render(self, surface): rotated_image = pygame.transform.rotate(self.image, math.degrees(self.turret_angle)) x, y = self.location w, h = rotated_image.get_size() surface.blit(rotated_image, (x - w / 2, y - h / 2)) if self.target is not None: pygame.draw.aaline(surface, VIOLET, self.location, self.target.location) def turret_face_entity(self, entity): angle = SentientEntity.get_angle(self.location, entity.location) self.turret_angle = angle @property def turret_angle(self): return utilities.unit_angle(self.__turret_angle) @turret_angle.setter def turret_angle(self, angle): self.__turret_angle = utilities.unit_angle(angle) class ScanEnvironment(State): def __init__(self, turret): super().__init__('scan') self.turret = turret def entry_actions(self): #self.turret.target = None pass def check_conditions(self): """Scan surroundings by scanning all enemies around""" half_cone = self.turret.cone_of_vision / 2 turret_angle = utilities.unit_angle(self.turret.turret_angle) def is_zombie(entity): return entity.name == 'zombie' zombies = filter(is_zombie, self.turret.world.entities.values()) for zombie in zombies: angle = SentientEntity.get_angle(self.turret.location, zombie.location) if turret_angle - half_cone < angle <= turret_angle + half_cone: self.turret.target = zombie #print('New target:', zombie) return 'attack' class AttackTargetState(State): def __init__(self, turret): super().__init__('attack') self.turret = turret def do_actions(self): # Rotate towards the target angle = SentientEntity.get_angle(self.turret.location, self.turret.target.location) self.turret.turret_angle = angle # attack target #self.turret.target.hp -= 1 self.turret.weapon.fire() def check_conditions(self): if self.turret.target.hp > 0 and self.turret.target is not None: return return 'scan' def exit_actions(self): self.turret.target = None
/run.py
import pygame from pygame.locals import * from game import * import sys import mobs from manager import ImageManager from random import randint image_dude = None TITLE = 'Zombie Defence v0.0.0' def main(): pygame.init() screen = pygame.display.set_mode(SCREEN_SIZE) pygame.display.set_caption(TITLE) clock = pygame.time.Clock() world = World() global image_dude image_dude = ImageManager('data/images/') setup_world(world) time_passed = 0 while True: if time_passed > 0: pygame.display.set_caption('{title} {fps:>.0f} FPS'.format(title=TITLE, fps=1000 / time_passed)) for event in pygame.event.get(): if event.type == QUIT: quit_game() # Dirty way of attacking the enemy lmb, mmb, rmb = pygame.mouse.get_pressed() mouse_x, mouse_y = pygame.mouse.get_pos() if lmb: e = world.get_close_entity('zombie', Vector2(mouse_x, mouse_y), radius=32) if e is not None: print('zombie found @ {}; state: {}'.format(e.location, e.brain.active_state.name)) e.hp -= 1 world.process(time_passed) screen.fill(BLACK) world.render(screen) pygame.display.update() time_passed = clock.tick(FPS) def quit_game(): pygame.quit() sys.exit() def setup_world(world): # Create RED sprite for zombie zombie_surf = image_dude['zombie.png'] for i in range(20): z_width, z_height = zombie_surf.get_size() randx = randint(z_width / 2, SCREEN_WIDTH - z_width / 2) randy = randint(z_height / 2, SCREEN_HEIGHT - z_height / 2) z_location = Vector2(randx, randy) zombie = mobs.Zombie(world, zombie_surf, z_location) world.add_entity(zombie) survivor_surf = pygame.Surface((32, 32)).convert() survivor_surf.fill(GREEN) for i in range(5): s_width, s_height = survivor_surf.get_size() randx = randint(s_width / 2, SCREEN_WIDTH - s_width / 2) randy = randint(s_height / 2, SCREEN_HEIGHT - s_height / 2) s_location = Vector2(randx, randy) survivor = mobs.Survivor(world, survivor_surf, s_location) world.add_entity(survivor) sentry_gun_surf = image_dude['sentrygun.png'] w, h = sentry_gun_surf.get_size() for i in range(1, 2): x, y = (SCREEN_WIDTH * i / 3, SCREEN_HEIGHT / 2) sentry_gun = mobs.SentryGun(world, sentry_gun_surf, Vector2(x, y)) world.add_entity(sentry_gun) for e in world.entities.values(): print(e) if __name__ == '__main__': main()
/tests/test_effects.py
import copy from effects import BulletTravelEffect, ExplosionEffect from game import World import unittest from pygame.math import Vector2 from game import * TICK_SECOND = 1000 / 30 / 1000 # One tick represented by 30 frames per second; 33 milliseconds class BulletTravelEffectTestCase(unittest.TestCase): def setUp(self): self.world = World() ''' self.origin = Vector2(0, SCREEN_HEIGHT) self.destination = Vector2(SCREEN_WIDTH, 0) self.bullet_effect = BulletTravelEffect(self.world, self.origin, self.destination) ''' self.origin = Vector2(0, SCREEN_HEIGHT) self.destination = Vector2(SCREEN_WIDTH, 0) self.color = YELLOW #self.duration = 1 / 10 # 1/10th of a second self.bullet = BulletTravelEffect(self.world, self.origin, self.destination, color=self.color) self.world.add_entity(self.bullet) def test_instance(self): origin = Vector2(0, SCREEN_HEIGHT) destination = Vector2(SCREEN_WIDTH, 0) color = YELLOW duration = 1/10 # 1/10th of a second bullet = BulletTravelEffect(self.world, origin, destination, color=color, duration=duration) self.assertEqual(bullet.location, origin) self.assertEqual(bullet.destination, destination) self.assertEqual(bullet.color, color) self.assertEqual(bullet.remaining_time, duration) def test_location_destination(self): pass def test_fade(self): d = 1 self.bullet.DURATION = d self.bullet.remaining_time = d # seconds # Test when the bullet trail/line starts to fade self.bullet.process(TICK_SECOND) self.assertLess(self.bullet.remaining_time, self.bullet.DURATION) self.assertEqual(self.bullet.remaining_time, self.bullet.DURATION - TICK_SECOND) def test_remaining_zero(self): # Kill the effect self.bullet.remaining_time = 0 self.bullet.process(TICK_SECOND) self.assertNotIn(self.bullet, self.world.entities.values()) def test_bullet_travel(self): """Test the bullet_head and bullet_tail vectors""" self.assertEqual(self.bullet.fx_head, self.bullet.location) self.assertEqual(self.bullet.fx_tail, self.bullet.location) #self.assertEqual(self.bullet.fx_length, 100) heading = (self.bullet.destination - self.bullet.location).normalize() self.assertEqual(self.bullet.fx_heading, heading) # Do one TICK; the head should start moving, while the tail remains the same self.bullet.process(TICK_SECOND) travelled = (TICK_SECOND * self.bullet.fx_speed) self.assertEqual(self.bullet.fx_head.distance_to(self.bullet.location), travelled) self.assertEqual(self.bullet.fx_tail, self.bullet.location) def test_process_head(self): num_ticks = 1000 ticks = list((TICK_SECOND for i in range(num_ticks))) tick_accumulate = 0 expected_head = {} b = self.bullet # build expected head; assumptions of fx_head's whereabouts relative to tick_accumulate for tick in ticks: heading = (b.destination - b.location).normalize() new_location = b.fx_head + (heading * (tick_accumulate + tick)* b.speed) # ^ accumulate current tick since it is leading tail expected_head[tick_accumulate] = new_location tick_accumulate += tick tick_accumulate = 0 for i, tick in enumerate(ticks): if b not in self.world.entities.values(): # bullet is no longer in this world... but still exists as object; # eg. b's fx_head == fx_tail == fx_destination break with self.subTest(tick_accumulate=tick_accumulate, i=i): b.process(tick) expected = expected_head[tick_accumulate] if b.fx_head != b.destination: self.assertEqual(expected, b.fx_head) tick_accumulate += tick def test_location(self): b = self.bullet self.assertEqual(b.fx_tail, b.location) self.assertEqual(b.fx_head, b.location) self.assertNotEqual(b.fx_head, b.destination) self.assertNotEqual(b.fx_tail, b.destination) self.assertIn(b, self.world.entities.values()) def test_process_tail(self): self.assertIsNotNone(self.bullet) num_ticks = 1000 ticks = list((TICK_SECOND for i in range(num_ticks))) tick_accumulate = 0 expected_head = {} expected_tail = {} b = self.bullet self.assertIn(TICK_SECOND, ticks) self.assertEqual(num_ticks, len(ticks)) # build expected tail; assumptions of fx_tail's whereabouts relative to tick_accumulate for tick in ticks: tail_heading = (b.destination - b.fx_tail).normalize() new_tail_location = b.fx_tail + (tail_heading * tick_accumulate * b.speed) expected_tail[tick_accumulate] = new_tail_location tick_accumulate += tick self.assertNotEqual(id(b.fx_tail), id(b.fx_head)) tick_accumulate = 0 for i, tick in enumerate(ticks): if b not in self.world.entities.values(): break with self.subTest(tick_accumulate=tick_accumulate, i=i): b.process(tick) #print(expected_tail[tick_accumulate], b.fx_tail, sep='=') self.assertEqual(expected_tail[tick_accumulate], b.fx_tail) tick_accumulate += tick @unittest.skip def test_each_tick(self): # There's a bug here, where the length is far less than fx_length, # relative to a single tick and its speed... But visually, it's not a big problem.\ num_ticks = 100 ticks = list((TICK_SECOND for i in range(num_ticks))) b = self.bullet tick_accumulate = 0 for tick in ticks: b.process(tick) with self.subTest(tick_accumulate=tick_accumulate): if b.fx_head != b.destination and b.fx_tail != b.destination and \ b.fx_tail != b.location and b.fx_head != b.location: self.assertAlmostEqual(b.fx_head.distance_to(b.fx_tail), b.fx_length, 1) tick_accumulate += tick def test_die(self): """Effect should die when both fx_head/tail reaches destination""" self.bullet.fx_head = self.bullet.destination self.bullet.fx_tail = self.bullet.fx_head self.bullet.process(TICK_SECOND) self.assertNotIn(self.bullet, self.world.entities.values()) class ExplosionEffectTestCase(unittest.TestCase): def setUp(self): self.exp_radius = 50 self.exp_duration = 1 # second self.world = World() self.exp_location = Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) #self.exp_image = pygame.Surface((32, 32)).fill(RED) self.exp_color = RED self.explosion = ExplosionEffect(self.world, self.exp_location, self.exp_radius, self.exp_color) self.world.add_entity(self.explosion) def test_instantiate_radius(self): # Negative radius with self.assertRaises(ValueError): ExplosionEffect(self.world, self.exp_location, -1) def test_instantiate_color(self): # Color argument type with self.assertRaises(TypeError): ExplosionEffect(self.world, self.exp_location, self.exp_radius, color=1) # Color argument length with self.assertRaises(ValueError): ExplosionEffect(self.world, self.exp_location, self.exp_radius, color=(100,200)) def test_die_radius_zero(self): self.explosion.radius = 0 self.explosion.process(TICK_SECOND) self.assertNotIn(self.explosion, self.world.entities.values()) def test_radius_shrink(self): """Explosion should shrink based on TICK""" old_radius = self.explosion.radius self.explosion.process(TICK_SECOND) self.assertLess(self.explosion.radius, old_radius) # num_ticks = 0 # while self.explosion.radius >= 0: # self.explosion.process(TICK_SECOND) # print('radius:', self.explosion.radius) # num_ticks += 1 # print(num_ticks)
/tests/test_entity.py
import unittest from pygame.math import Vector2 from game import * from entity import * class GameEntityTestCase(unittest.TestCase): def setUp(self): self.world = World() self.ENTITY_WIDTH, self.ENTITY_HEIGHT = self.ENTITY_SIZE = (32, 32) self.entity_image = pygame.Surface(self.ENTITY_SIZE) x = SCREEN_WIDTH / 2 y = SCREEN_HEIGHT / 2 self.entityA = SentientEntity(self.world, 'dummy', self.entity_image, location=Vector2(x, y)) x = SCREEN_WIDTH * 3 / 4 y = SCREEN_HEIGHT * 3 / 4 self.entityB = SentientEntity(self.world, 'dummy', self.entity_image, location=Vector2(x, y)) def test_face_entity(self): rotation_a = self.entityA.face_entity(self.entityB) # Manually calculate rotation vec_diff = self.entityB.location - self.entityA.location angle = utilities.unit_angle(-math.atan2(vec_diff.y, vec_diff.x)) self.assertAlmostEqual(angle, rotation_a, 4) self.assertAlmostEqual(angle, self.entityA.angle, 4) def test_face_vector(self): # Do face_vector version: rotation_a = self.entityA.face_vector(self.entityB.location) # Manually calculate rotation vec_diff = self.entityB.location - self.entityA.location angle = utilities.unit_angle(-math.atan2(vec_diff.y, vec_diff.x)) self.assertAlmostEqual(angle, rotation_a, 4) self.assertAlmostEqual(angle, self.entityA.angle, 4) def test_get_angle(self): angle = SentientEntity.get_angle(self.entityA.location, self.entityB.location) # Manually calculate angle vec_diff = self.entityB.location - self.entityA.location calc_angle = utilities.unit_angle(-math.atan2(vec_diff.y, vec_diff.x)) self.assertAlmostEqual(calc_angle, angle, 4) class GameEntityBoundaryRectTestCase(unittest.TestCase): def setUp(self): self.dummy_surf = pygame.Surface((32, 32)) self.location = Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) self.world = World() self.entity = GameEntity(self.world, 'dummy', self.dummy_surf, self.location) self.world.add_entity(self.entity) # What we're interested in: self.rect_width = 16 # surface may have 32px width, but entity should really be 16px when performing things self.rect_height = 32 self.boundary_rect = pygame.Rect((0, 0), (self.rect_width, self.rect_height)) # note: x/y don't matter self.boundary_rect_offset = Vector2(-self.rect_width / 2, -self.rect_height) # Offset from entity.location def test_set_boundary_rect(self): self.entity.set_rect(self.boundary_rect) # Should ignore rect x and y... self.assertEqual(self.entity._GameEntity__rect.width, self.boundary_rect.width) self.assertEqual(self.entity._GameEntity__rect.height, self.boundary_rect.height) def test_set_boundary_rect_with_offset(self): self.entity.set_rect(self.boundary_rect, self.boundary_rect_offset) # Should ignore rect x and y... self.assertEqual(self.entity._GameEntity__rect, self.boundary_rect) self.assertEqual(self.entity._GameEntity__rect_offset, self.boundary_rect_offset) def test_get_boundary_rect(self): self.entity.set_rect(self.boundary_rect) rect = self.entity.get_rect() self.assertEqual(self.entity._GameEntity__rect.width, rect.width) self.assertEqual(self.entity._GameEntity__rect.height, rect.height) # Because there is no offset, the rect will be centered to location self.assertEqual(rect.x, self.entity.location.x - rect.width / 2) self.assertEqual(rect.y, self.entity.location.y - rect.height / 2) def test_get_boundary_rect_with_offsets(self): self.entity.set_rect(self.boundary_rect, self.boundary_rect_offset) rect = self.entity.get_rect() loc = self.entity.location brect = self.boundary_rect self.assertEqual(rect.x, loc.x - brect.width / 2 + self.boundary_rect_offset.x) self.assertEqual(rect.y, loc.y - brect.height / 2 + self.boundary_rect_offset.y) def test_get_boundary_rect_no_rect_height_width_only(self): """Test the get_rect() method to return the entity's image rect instead of rect when there is none assigned. This test will not concern the entity's rectangle's X/Y coordinates.""" rect = self.entity.get_rect() image_rect = self.entity.image.get_rect() self.assertEqual(rect.width, image_rect.width) self.assertEqual(rect.height, image_rect.height) def test_get_boundary_rect_no_rect(self): """Continuation of above, but considers x and y attributes""" rect = self.entity.get_rect() image_rect = self.entity.image.get_rect() self.assertEqual(rect.x, self.location.x - image_rect.width / 2) self.assertEqual(rect.y, self.location.y - image_rect.height / 2) class SentientEntitySidesTestCase(unittest.TestCase): def setUp(self): self.world = World() self.good_guy_name = 'good_guy' self.bad_guy_name = 'bad_fuy' self.other_bad_guy_name = 'bad_man' self.good_guy = SentientEntity(self.world, self.good_guy_name, None, Vector2(100, 100), speed=0, enemies=[self.bad_guy_name, self.other_bad_guy_name]) self.bad_guy = SentientEntity(self.world, self.bad_guy_name, None, Vector2(150, 140), speed=0, enemies=[self.good_guy_name]) self.bad_guy2 = SentientEntity(self.world, self.other_bad_guy_name, None, Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2), speed=0, enemies=[self.good_guy_name]) self.world.add_entity(self.good_guy) self.world.add_entity(self.bad_guy) self.world.add_entity(self.bad_guy2) def test_get_enemy_entity(self): enemy = self.good_guy.get_close_enemy(radius=100) self.assertIsNotNone(enemy) self.assertIn(enemy.name, self.good_guy.enemies) def test_get_enemy_entity_other_bad_guy(self): # Replace other bad guy's location with first bad guys', and put the first far away temp_loc = self.bad_guy.location self.bad_guy.location = Vector2(*SCREEN_SIZE) self.bad_guy2.location = temp_loc enemy = self.good_guy.get_close_enemy(radius=100) self.assertIsNotNone(enemy) self.assertIn(enemy.name, self.good_guy.enemies) self.assertEqual(enemy.name, self.other_bad_guy_name) def test_get_enemy_entity_beyond_radius(self): self.good_guy.location = (0, 0) self.bad_guy.location = Vector2(*SCREEN_SIZE) self.bad_guy2.location = Vector2(*SCREEN_SIZE) enemy = self.good_guy.get_close_enemy(radius=100) self.assertIsNone(enemy)
/tests/test_image_manager.py
import unittest from manager import ImageManager import pygame import os from errors import * class ImageManagerTestCaseA(unittest.TestCase): def test_try_making_imagemanager(self): """ImageManager should raise an error if the screen surface has not been initialised yet""" with self.assertRaises(ScreenNotInitialized): imagemanager = ImageManager() class ImageManagerTestCaseB(unittest.TestCase): def setUp(self): # Initialise required stuff pygame.init() self.screen = pygame.display.set_mode((640, 480)) self.path = '../data/images/' self.imagedude = ImageManager(self.path) # Load images from data/images/ self.bg = pygame.image.load(os.path.join(self.path, 'backgroundA.jpg')).convert() # Load image self.bg_width, self.bg_height = self.bg.get_size() self.imagedude['backgroundB.jpg'] = self.bg # Add image def test_try_making_imagemanager(self): """ImageManager should raise an error if the screen surface has not been initialised yet""" pygame.quit() pygame.init() with self.assertRaises(ScreenNotInitialized): imagemanager = ImageManager() def test_add_image_invalid_value(self): with self.assertRaises(TypeError): self.imagedude['abc'] = '123' self.imagedude['edf'] = 123 def test_add_images(self): # Add image image_name = 'bg' self.imagedude[image_name] = self.bg self.assertEqual(self.imagedude[image_name], self.bg) def test_get_image(self): bg = self.imagedude['backgroundB.jpg'] self.assertEqual(bg, self.bg) @unittest.skip def test_get_image_invalid_type(self): with self.assertRaises(TypeError): surf = self.imagedude[123123] def test_get_image_not_found(self): with self.assertRaises(FileNotFoundError): surf = self.imagedude['filenotfoundimage.png'] def test_automatic_load_image(self): """Load an image that has not been loaded before""" # Make sure that the requested surface is not none background = self.imagedude['backgroundA.jpg'] self.assertIsNotNone(background) # Test that the image was actually stored into the dictionary self.assertEqual(background, self.imagedude['backgroundA.jpg']) # Compare the dimensions of the loaded images bgB = pygame.image.load(os.path.join(self.path, 'backgroundA.jpg')).convert() background_size = background.get_size() bgB_size = bgB.get_size() self.assertEqual(background_size, bgB_size) # Test loading image that doesn't exist. with self.assertRaises(FileNotFoundError): image = self.imagedude['asdflkjoiuqeioqwe.jog'] # Make sure that loading images with invalid image filename types is illegal with self.assertRaises(TypeError): invalid = self.imagedude[123456] invalid = self.imagedude[123456.3] def test_transparent_image(self): # Test loading an image with alpha transparent_image = self.imagedude['transparent.png'] pixel = transparent_image.get_at((10, 10)) self.assertNotEqual(pixel, (0, 0, 0)) self.assertNotEqual(pixel, (255, 255, 255)) self.assertEqual(transparent_image.get_at((70, 70)), (0, 0, 0)) # BLACK self.assertEqual(transparent_image.get_at((35, 70)), (149, 0, 186)) # Arbitrary purple def test_pre_cache_all(self): pass def test_directory(self): imagedude_path = self.imagedude.image_directory #print(imagedude_path) self.assertEqual(imagedude_path, os.path.abspath(self.path)) all_filesA = tuple((entry.name for entry in os.scandir(imagedude_path))) all_filesB = tuple((entry.name for entry in os.scandir(self.path))) self.assertTupleEqual(all_filesA, all_filesB) if __name__ == '__main__': unittest.main()
/tests/test_mobs.py
import unittest from manager import ImageManager import time from mobs import * from game import * from pygame.math import Vector2 class SentryGunTestCase(unittest.TestCase): def setUp(self): pygame.init() self.screen = pygame.display.set_mode(SCREEN_SIZE) self.image_manager = ImageManager('../data/images/') self.sentry_gun_image = self.image_manager['sentrygun.png'] self.world = World() self.TICK_SECOND = 33 / 1000 # Create the sentry gun x = SCREEN_WIDTH / 2 y = SCREEN_HEIGHT / 2 self.sentry_gun = SentryGun(self.world, self.sentry_gun_image, (x, y)) self.world.add_entity(self.sentry_gun) # Add a couple of zombies ''' for i in range(10): zombie_image = self.image_manager['zombie.png'] zombie = Zombie(self.world, zombie_image, (randint(0, SCREEN_WIDTH), randint(0, SCREEN_HEIGHT))) self.world.add_entity(zombie) ''' # Main zombie self.zombie = Zombie(self.world, self.image_manager['zombie.png'], (100, 100)) self.world.add_entity(self.zombie) self.world.render(self.screen) pygame.display.update() def test_turret_face_target(self): self.sentry_gun.turret_face_entity(self.zombie) self.sentry_gun.brain.think() self.assertEqual(self.sentry_gun.target, self.zombie) def test_target_acquire(self): # Make the turret face the zombie angle = SentientEntity.get_angle(self.sentry_gun.location, self.zombie.location) self.sentry_gun.turret_angle = angle self.sentry_gun.brain.think() # Switch states from scan to face print(self.sentry_gun.brain.active_state.name) self.assertEqual(self.sentry_gun.target, self.zombie) @unittest.skip def test_rotate_to_target(self): self.sentry_gun.target = self.zombie self.sentry_gun.brain.set_state('face') # Do a loop that will repeatedly call think ''' prev_angle = self.sentry_gun.turret_angle for i in range(100): self.screen.fill((0, 0, 0)) #with self.subTest(i=i): self.sentry_gun.process(self.TICK_SECOND) #self.assertNotEqual(self.sentry_gun.turret_angle, prev_angle) print('angle:',self.sentry_gun.turret_angle) #angle_diff = self.sentry_gun.turret_angle - prev_angle #self.assertAlmostEqual(angle_diff, self.sentry_gun.turret_rotation_rate * self.TICK_SECOND, 4) prev_angle = self.sentry_gun.turret_angle self.world.render(self.screen) pygame.display.update() ''' def test_turret_angle(self): self.assertAlmostEqual(self.sentry_gun.turret_angle,utilities.unit_angle(self.sentry_gun.turret_angle)) new_angle = 100 self.sentry_gun.turret_angle = new_angle angle = self.sentry_gun.turret_angle self.assertEqual(angle, utilities.unit_angle(new_angle)) def test_entity_angle(self): self.assertAlmostEqual(self.sentry_gun.angle, utilities.unit_angle(self.sentry_gun.angle)) new_angle = 100 self.sentry_gun.angle = new_angle angle = self.sentry_gun.angle self.assertEqual(angle, utilities.unit_angle(new_angle)) def test_attack_target(self): #self.sentry_gun.face_entity(self.zombie) self.sentry_gun.turret_angle = SentientEntity.get_angle(self.sentry_gun.location, self.zombie.location) for i in range(10): self.sentry_gun.brain.think() current_state_name = self.sentry_gun.brain.active_state.name #self.assertEqual(current_state_name, 'attack') self.assertEqual(self.sentry_gun.target, self.zombie) # Kill target and check if it returns to scan mode self.zombie.hp -= 10000 self.sentry_gun.brain.think() current_state_name = self.sentry_gun.brain.active_state.name self.assertEqual(current_state_name, 'scan') self.assertIsNone(self.sentry_gun.target) # it should no longer target dead zombie self.sentry_gun.target = None # Move the zombie somewhere it cannot be seen by the turret self.zombie.hp = 10 x = self.sentry_gun.location.x + 100 y = self.sentry_gun.location.y + 100 self.zombie.location = Vector2(x, y) for i in range(10): self.sentry_gun.brain.think() self.assertIsNone(self.sentry_gun.target) # No target since zombie is behind turret current_state_name = self.sentry_gun.brain.active_state.name self.assertEqual(current_state_name, 'scan')
/tests/test_projectile.py
from weapon import Weapon, Projectile, Warhead from unittest import TestCase from game import * import utilities class DestinationProjectileTestCase(TestCase): def setUp(self): self.warhead = None self.speed = 100 self.world = World() self.location = Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) self.destination = Vector2(SCREEN_WIDTH, SCREEN_HEIGHT) self.projectile = Projectile(self.world, None, self.location, self.destination, self.speed, self.warhead) self.world.add_entity(self.projectile) def test_instance(self): pass class AngledProjectileTestCase(TestCase): def setUp(self): self.speed = 100 self.world = World() self.location = Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) self.angle = utilities.unit_angle(math.radians(300)) self.direction = Vector2(1, 0).rotate(self.angle) self.max_distance = 200 self.projectile = Projectile(self.world, 'bullet', None, self.location, self.direction, speed=self.speed, damage=0, max_distance=self.max_distance) self.world.add_entity(self.projectile) def test_instance(self): pass def test_max_distance_remove_from_world(self): seconds = self.max_distance / self.speed self.projectile.process(seconds) self.assertNotIn(self.projectile, self.world.entities.values())
/tests/test_utilities.py
import math import unittest import utilities class UtilitiesTestCase(unittest.TestCase): def test_unit_circle_angle(self): angles = list(range(-20, 20)) hypotenuse = 5 assumed_angles = {} for angle in angles: opposite = hypotenuse * math.sin(angle) assumed_angles[angle] = opposite for angle in angles: with self.subTest(angle=angle): converted_angle = utilities.unit_angle(angle) self.assertLessEqual(converted_angle, math.pi * 2) self.assertGreaterEqual(converted_angle, 0) opposite = hypotenuse * math.sin(converted_angle) self.assertAlmostEqual(assumed_angles[angle], opposite, 10) def test_unit_circle_angle_bounds(self): hypotenuse = 10 angles = (0, math.pi * 2) for angle in angles: with self.subTest(angle=angle): expected_adjacent = hypotenuse adjacent = hypotenuse * math.cos(utilities.unit_angle(angle)) self.assertAlmostEqual(adjacent, expected_adjacent) expected_opposite = 0 opposite = hypotenuse * math.sin(utilities.unit_angle(angle)) self.assertAlmostEqual(opposite, expected_opposite)
/tests/test_warhead.py
from weapon import Weapon, Projectile, Warhead import unittest from game import * class WarheadTestCase(unittest.TestCase): """Warheads should be reusable for different projectiles of same type""" def setUp(self): """ self.warhead = None self.speed = 100 self.world = World() self.location = Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) self.projectile = Projectile(self.world, None, self.location, self.destination, self.speed, self.warhead) self.world.add_entity(self.projectile) """ def test_instance(self): pass class WarheadTestCase(unittest.TestCase): def setUp(self): self.damage = 0 self.vs_armor = 0.5 self.vs_flesh = 1 self.weapon = None # If there is one, the weapon will fire too self.radius = 0 self.attached_effect = None
/tests/test_weapon.py
import unittest import utilities from weapon import * from game import * class WeaponSimplifiedTestCase(unittest.TestCase): def setUp(self): self.fire_rate = 3 # bullets per second self.world = World() self.owner_location = Vector2(SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2) self.owner = GameEntity(self.world, 'dummy', None, self.owner_location) self.ammo = 9999 self.damage = 10 self.weapon = WeaponSimplified(self.world, self.owner, self.fire_rate, self.damage, self.ammo) def test_ammunition_decrease_1tick(self): self.weapon.process(TICK_SECOND) self.weapon.fire() self.assertEqual(self.weapon.ammo, self.ammo - 1) # def test_ammunition_decrease_2sec(self): # seconds = 2 # self.weapon.process(seconds) # self.assertEqual(self.weapon.ammo, self.ammo - self.fire_rate * seconds) def test_after_2seconds_ready_to_fire(self): self.weapon.fire() self.assertFalse(self.weapon.ready_to_fire) self.weapon.process(2) self.weapon.ready_to_fire = True pass def test_bullets_spawned_on_fire(self): self.weapon.process(1) self.weapon.fire() self.assertGreater(self.world.entity_count(), 0) def test_bullets_damage(self): self.weapon.process(1) bullets = (e for e in self.world.entities.values() if e.name == 'bullet') for b in bullets: with self.subTest(bullet=b): self.assertEqual(b.damage, self.weapon.damage) def test_no_ammo(self): self.weapon.ammo = 0 self.weapon.process(TICK_SECOND) self.weapon.fire() self.assertEqual(self.weapon.ammo, 0) self.assertEqual(self.weapon.accumulator, 0) # accumulator = 0, since there is no more ammo
/tests/test_world.py
from mobs import * from random import randint, random import unittest from game import * import pygame NUM_ZOMBIES = 10 NUM_SURVIVORS = 5 NUM_SENTRY_GUNS = 2 class WorldTestCase(unittest.TestCase): def setUp(self): self.world = World() dummy_surface = pygame.Surface((16, 16)) w, h = dummy_surface.get_size() # Add zombies for i in range(NUM_ZOMBIES): x = random() * SCREEN_WIDTH y = random() * SCREEN_HEIGHT zombie = Zombie(self.world, dummy_surface, Vector2(x, y)) self.world.add_entity(zombie) # Add survivors for i in range(NUM_SURVIVORS): x = random() * SCREEN_WIDTH y = random() * SCREEN_HEIGHT survivor = Survivor(self.world, dummy_surface, Vector2(x, y)) self.world.add_entity(survivor) # Add sentry guns for i in range(NUM_SENTRY_GUNS): x = random() * SCREEN_WIDTH y = random() * SCREEN_HEIGHT self.sentry_gun = SentryGun(self.world, dummy_surface, Vector2(x, y)) self.world.add_entity(self.sentry_gun) def test_list_all_entities_with_name(self): zombies = tuple(self.world.entities_with_name('zombie')) survivors = tuple(self.world.entities_with_name('survivor')) sentry_guns = tuple(self.world.entities_with_name('sentry_gun')) self.assertEqual(len(zombies), NUM_ZOMBIES) self.assertEqual(len(survivors), NUM_SURVIVORS) self.assertEqual(len(sentry_guns), NUM_SENTRY_GUNS) def test_get_close_entity_type_zombie(self): z = self.world.get_close_entity('zombie', SCREEN_CENTER) self.assertEqual(z.name, 'zombie')
/utilities.py
import math ''' def unit_angle(angle): """Convert radians to unit circle radians' range of 0 to 6.28""" one_rev = math.pi * 2 if angle > 0: return divmod(angle, math.pi * 2)[1] if angle < 0: angle = divmod(angle, one_rev)[1] if angle < 0: return angle + one_rev return angle ''' def unit_angle(angle): """Convert radians to unit circle radians' range of 0 to 6.28""" one_rev = math.pi * 2 angle = divmod(angle, math.pi * 2)[1] if angle < 0: return angle + one_rev return angle
/weapon.py
from random import * from entity import * from game import * ''' self.pistol = Weapon(self.weap_damage, \ self.weap_clip, \ self.weap_reload_rate, \ self.weap_fire_rate, \ self.weap_spread, \ self.weap_rounds_per_shot, \ self.weap_projectile_type, \ self.weap_projectile_count) ''' class Weapon: def __init__(self, damage=1, clip=1, max_ammo=90, reload_rate=1, fire_rate=1, spread=0, rounds_per_shot=1, proj_type=None, num_proj=1, proj_speed=100, warhead=None, factory=None, reload_entire_clip=True, projectile_factory=None): if factory is not None: factory(self) self.DAMAGE = damage self.clip = clip self.MAX_CLIP = clip self.MAX_AMMO = max_ammo self.RELOAD_RATE = reload_rate self.FIRE_RATE = fire_rate self.SPREAD = spread self.ROUNDS_PER_SHOT = rounds_per_shot self.PROJECTILE_TYPE = proj_type self.NUM_PROJECTILES = num_proj self.PROJECTILE_SPEED = proj_speed self.WARHEAD=warhead self.ready = True self.reload_entire_clip = reload_entire_clip def shoot_angled(self, world, angle): """Shoot the projectiles at an angle, and add them into the world""" pass def process(self, seconds_passed): if self.clip == 0: # reload self.reload(seconds_passed) if self.is_ready(): pass def reload(self, seconds_passed): self.clip += self.RELOAD_RATE * seconds_passed # if self.clip > self.MAX_CLIP: class ProjectileFactory: """Class that gives a new projectile object each time it is called. An instance of it will reside in a weapon object.""" def __init__(self, ptype, speed, image, warhead): pass class Projectile(GameEntity): def __init__(self, world, name, image, location, direction_vec, speed=200, damage=0, max_distance=300, owner=None): super().__init__(world, name, image, location, None, speed) self.direction = direction_vec self.damage = damage self.origin = location self.max_distance = max_distance self.owner = owner def process(self, seconds_passed): if self.location.distance_to(self.origin) >= self.max_distance: self.world.remove_entity(self) return self.location += self.direction * self.speed * seconds_passed def render(self, surface): if self.image is not None: super().render(surface) return pygame.draw.circle(surface, YELLOW, (int(self.location.x), int(self.location.y)), 1) @staticmethod def factory(type_name, world, owner, weapon): angle = owner.angle if not hasattr(owner, 'turret_angle') else owner.turret_angle angle *= -1 # Multiply by -1 to fix direction vector direction = Vector2(1, 0).rotate(math.degrees(angle) + uniform(-weapon.spread/2, weapon.spread/2)) if type_name == 'bullet': return Projectile(world, 'bullet', None, owner.location, direction, speed=500, damage=weapon.damage, owner=owner) raise ValueError('Unknown projectile type name {}'.format(type_name)) class Warhead: pass class WeaponSimplified(SentientEntity): """A simple weapon that fires without reload; just a delay in between.""" def __init__(self, world, owner, fire_rate, damage, ammo, spread=0): self.world = world self.owner = owner self.fire_rate = fire_rate self.damage = damage self.ammo = ammo self.accumulator = 0 self.spread = spread self.ready_to_fire = True def render(self, surface): return def process(self, seconds_passed): if self.ammo <= 0: self.accumulator = 0 return if self.ready_to_fire: return if self.accumulator >= 1 / self.fire_rate: self.accumulator = 0 self.ready_to_fire = True self.accumulator += seconds_passed def fire(self): if not self.ready_to_fire or self.ammo <= 0: return self.ready_to_fire = False bullet = Projectile.factory('bullet', self.world, self.owner, self) self.world.add_entity(bullet) self.ammo -= 1
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Tarun-yadav777/Djanjo-Project-Schemer-
refs/heads/master
{"/schemegen/admin.py": ["/schemegen/models.py"], "/schemegen/views.py": ["/schemegen/models.py"]}
└── └── schemegen ├── admin.py ├── migrations │ ├── 0001_initial.py │ ├── 0002_delete_genre.py │ └── 0003_auto_20201116_1718.py ├── models.py ├── urls.py └── views.py
/schemegen/admin.py
from django.contrib import admin from .models import Schemegen, User_info admin.site.register(Schemegen) admin.site.register(User_info)
/schemegen/migrations/0001_initial.py
# Generated by Django 3.0.8 on 2020-11-13 11:23 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Genre', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Schemegen', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('type', models.CharField(choices=[("W's D", "Women's Development"), ('S J', 'Social Justice'), ('S', 'Sports'), ('R D', 'Ruler Development'), ("C's D", 'Child Development')], default="Women's Development", max_length=20)), ('info_link', models.URLField()), ], ), migrations.CreateModel( name='User_info', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200)), ('gender', models.CharField(max_length=6)), ('dob', models.DateField()), ('address', models.CharField(max_length=100)), ('phone_no', models.IntegerField()), ('interested_scheme', models.CharField(max_length=200)), ], ), ]
/schemegen/migrations/0002_delete_genre.py
# Generated by Django 3.0.8 on 2020-11-13 11:25 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('schemegen', '0001_initial'), ] operations = [ migrations.DeleteModel( name='Genre', ), ]
/schemegen/migrations/0003_auto_20201116_1718.py
# Generated by Django 3.0.8 on 2020-11-16 11:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('schemegen', '0002_delete_genre'), ] operations = [ migrations.AlterField( model_name='schemegen', name='type', field=models.CharField(choices=[("Women's Development", "Women's Development"), ('Social Justice', 'Social Justice'), ('Sports', 'Sports'), ('Ruler Development', 'Ruler Development'), ('Child Development', 'Child Development')], default="Women's Development", max_length=20), ), ]
/schemegen/models.py
from django.db import models class Schemegen(models.Model): choices = [ ("Women's Development", "Women's Development"), ("Social Justice", "Social Justice"), ("Sports", "Sports"), ("Ruler Development", "Ruler Development"), ("Child Development", "Child Development") ] name = models.CharField(max_length=200) type = models.CharField(max_length=20, choices=choices, default="Women's Development") info_link = models.URLField(max_length=200) def __str__(self): return self.name class User_info(models.Model): name = models.CharField(max_length=200) gender = models.CharField(max_length=6) dob = models.DateField(auto_now=False) address = models.CharField(max_length=100) phone_no = models.IntegerField() interested_scheme = models.CharField(max_length=200) def __str__(self): return self.name
/schemegen/urls.py
from django.urls import path from schemegen import views urlpatterns = [ path('', views.index, name='index'), path('adhaar', views.adhaar, name='adhaar'), path('requirements', views.requirements, name='requirements'), path('detail', views.detail, name='detail'), path('adhaar-hindi', views.adhaar_hindi, name='adhaar_hindi'), path('requirements-hindi', views.requirements_hindi, name='requirements-hindi'), ]
/schemegen/views.py
from django.shortcuts import render from django.http import HttpResponse from .models import Schemegen def index(request): return render(request, 'Index.html') def adhaar(request): return render(request, 'Adhaar.html') def requirements(request): return render(request, 'Requirements.html') def detail(request): type = request.GET['occupation'] schemes = Schemegen.objects.filter(type=type) return render(request, 'detail.html', {'schemes': schemes}) def adhaar_hindi(request): return render(request, 'adhaar_hindi.html') def requirements_hindi(request): return render(request, 'requirements_hindi.html')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
lqy9860/qy_code
refs/heads/master
{"/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/main.py": ["/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/engine.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/engine.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/computergothread.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/usergothread.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessboard.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/computergothread.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/usergothread.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessboard.py"], "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/main_tcpserver.py": ["/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/engine.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/engine.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/usergothread.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessboard.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/usergothread.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessboard.py"], "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessboard.py": ["/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessman.py"], "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/engine.py": ["/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/gobang2_client/chessboard.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessman.py", "/\u7f51\u7edc\u53cc\u4eba\u5bf9\u6218\u4e94\u5b50\u68cb/Gobang_tcp/chessboard.py"]}
└── └── 网络双人对战五子棋 ├── Gobang_tcp │ ├── chessboard.py │ ├── chessman.py │ ├── computergothread.py │ ├── engine.py │ ├── main.py │ ├── main_tcpserver.py │ ├── test.py │ └── usergothread.py └── gobang2_client ├── chessboard.py ├── chessman.py ├── clientRecv.py ├── computergothread.py ├── engine.py ├── main.py └── usergothread.py
/网络双人对战五子棋/Gobang_tcp/chessboard.py
from chessman import ChessMan class ChessBoard(object): BOAED_SIZE=15 def __init__(self): # 初始化 self.__board=[[0 for i in range(ChessBoard.BOAED_SIZE+1)] for i in range(ChessBoard.BOAED_SIZE+1)] # 清空棋盘 def initBoard(self): # 忽略第一行 for i in range(1,ChessBoard.BOAED_SIZE+1): for j in range(1,ChessBoard.BOAED_SIZE+1): self.__board[i][j]='+' # 打印棋盘 def printBoard(self): # 打印行号 print(' ',end='') for i in range(1, ChessBoard.BOAED_SIZE + 1): print(chr(i+ord('a')-1),end='') print() for i in range(1, ChessBoard.BOAED_SIZE + 1): # 打印列号 print('%2d'%i,end='') for j in range(1, ChessBoard.BOAED_SIZE + 1): print(self.__board[i][j],end='') print() def setChess(self,pos,color): # 放置棋子 if not isinstance(pos,tuple) and not isinstance(pos,list): raise Exception("第一个参数被选为元组或列表") if pos[0] <= 0 or pos[0]>ChessBoard.BOAED_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1]>ChessBoard.BOAED_SIZE: raise Exception('下标越界') self.__board[pos[0]][pos[1]]=color def setChessMan(self,chessman): if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') pos=chessman.Pos color=chessman.Color self.setChess(pos,color) def getChess(self,pos): if not isinstance(pos, tuple) and not isinstance(pos, list): raise Exception("第一个参数被选为元组或列表") if pos[0] <= 0 or pos[0] > ChessBoard.BOAED_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1] > ChessBoard.BOAED_SIZE: raise Exception('下标越界') return self.__board[pos[0]][pos[1]] def isEmpty(self,pos): if not isinstance(pos, tuple) and not isinstance(pos, list): raise Exception("第一个参数被选为元组或列表") if pos[0] <= 0 or pos[0] > ChessBoard.BOAED_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1] > ChessBoard.BOAED_SIZE: raise Exception('下标越界') return self.getChess(pos)=='+'
/网络双人对战五子棋/Gobang_tcp/chessman.py
import threading class ChessMan(object): # 棋子类 def __init__(self): self.__pos=[0,0] self.__color='+' self.con=threading.Condition() def NOTIFY(self): # 对notify进行封装 self.con.acquire() self.con.notify() self.con.release() def WAIT(self): # 对wait进行封装 self.con.acquire() self.con.wait() self.con.release() def setPos(self,pos): # 设置棋子位置 self.__pos=pos def getPos(self): # 获取棋子位置 return self.__pos def setColor(self, color): # 设置棋子颜色 self.__color = color def getColor(self): # 获取棋子颜色 return self.__color # 装饰器,先return再setter @property def Pos(self): return self.__pos # 根据前面的方法命名 @Pos.setter def Pos(self,pos): self.__pos=pos @property def Color(self): return self.__color @Color.setter def Color(self,color): self.__color=color
/网络双人对战五子棋/Gobang_tcp/computergothread.py
import threading import re class ComputerGoThread(threading.Thread): def __init__(self, chessman, engine,client_socker): super().__init__() self.chessman = chessman self.engine = engine self.client_socker=client_socker self.con = threading.Condition() def run(self): while 1: # 1.wait self.chessman.WAIT() # 2.电脑下棋 self.engine.computerGo(self.chessman) # 发送棋子位置 msg = str(self.chessman.Pos[0]) + str(',') + str(self.chessman.Pos[1]) self.client_socker.send(msg.encode('gbk')) # 3.notify self.chessman.NOTIFY() class RECV_ComputerGoThread(threading.Thread): def __init__(self,chessman,engine,client_socker): super().__init__() self.chessman=chessman self.engine=engine self.client_socker=client_socker self.con=threading.Condition() def run(self): while 1: # 1.wait self.chessman.WAIT() # 2.接收电脑下棋 recv_data = self.client_socker.recv(1024).decode('gbk') if len(recv_data) != 0 and recv_data != None: # 正则表达式判断输入的格式 (1-15,a-o或1-15) pattern = '^([1-9]|1[0-5]),([a-o]|[1-9]|1[0-5])$' ret = re.findall(pattern, recv_data) print(ret) if len(ret): posX, posY = ret[0] posX = int(posX) # 如果第二个参数是字母,进行转数字的处理 if posY.isalpha(): posY = ord(posY) - ord('a') + 1 else: posY = int(posY) # print(posX,posY) self.chessman.Pos = (posX, posY) # 3.notify self.chessman.NOTIFY()
/网络双人对战五子棋/Gobang_tcp/engine.py
import random import re from chessboard import ChessBoard from chessman import ChessMan class Engine(object): def __init__(self,chessboard): self.__chessboard=chessboard def computerGo(self,chessman): if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') # 电脑随机下棋 # 先判断是否为空 while 1: posX=random.randint(1,15) posY=random.randint(1,15) # 如果为空,获取棋子位置并退出循环 if self.__chessboard.isEmpty((posX,posY)): chessman.Pos=(posX,posY) print('电脑下棋位置:',posX,posY) break def userGo(self,chessman,userInput): if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') # 用户下棋 # 正则表达式判断输入的格式 (1-15,a-o或1-15) pattern='^([1-9]|1[0-5]),([a-o]|[A-O]|[1-9]|1[0-5])$' ret=re.findall(pattern,userInput) if len(ret): posX,posY=ret[0] posX=int(posX) # 如果第二个参数是字母,进行转数字的处理 if posY.isalpha() and ord(posY) >= 97: posY = ord(posY) - ord('a') + 1 elif posY.isalpha() and ord(posY) >= 65: posY = ord(posY) - ord('A') + 1 else: posY=int(posY) # 如果位置为空,设置棋子位置,并返回True if self.__chessboard.isEmpty((posX,posY)): chessman.Pos=(posX,posY) print('用户下棋位置:',posX, posY) return True return False def isWon(self,pos,color): if not isinstance(pos,tuple) and not isinstance(pos,list): raise Exception("第一个参数被选为元组或列表") if pos[0] <= 0 or pos[0]>ChessBoard.BOAED_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1]>ChessBoard.BOAED_SIZE: raise Exception('下标越界') # print("棋子位置",pos[0],pos[1]) # 判断下某一颗棋子后是否赢 # 上下 count = 0 # 开始标志 startX=1 if pos[0] -4>=1: startX=pos[0]-4 # 结束标志 endX=ChessBoard.BOAED_SIZE if pos[0] +4<=ChessBoard.BOAED_SIZE: endX=pos[0]+4 # posX范围 for posX in range(startX,endX+1): if self.__chessboard.getChess((posX,pos[1]))==color : count +=1 if count == 5: return True else: count=0 # 左右 # 开始标志 startY=1 if pos[1] -4>=1: startY=pos[1]-4 # 结束标志 endY=ChessBoard.BOAED_SIZE if pos[1] +4<=ChessBoard.BOAED_SIZE: endY=pos[1]+4 # posY范围 for posY in range(startY,endY+1): if self.__chessboard.getChess((pos[0],posY))==color : count +=1 if count == 5: return True else: count=0 # 左上右下 # 将棋盘划分为两部分,x>y和x<y # 开始标志 startX=1 if pos[0] >= pos[1] and pos[1]-4<=1 : startX=pos[0]-pos[1]+1 elif pos[0] -4 >= 1 : startX = pos[0] - 4 # 结束标志 endX=ChessBoard.BOAED_SIZE if pos[0] <= pos[1] and pos[1]+4 >=ChessBoard.BOAED_SIZE: endX=15-(pos[1]-pos[0]) elif pos[0] <=ChessBoard.BOAED_SIZE-4: endX = pos[0] + 4 # posX范围 # print("左上右下范围",startX,endX) for posX in range(startX,endX+1): posY = pos[1] - (pos[0] - posX) # print(posX,posY) if self.__chessboard.getChess((posX,posY))==color : count +=1 if count == 5: return True else: count = 0 # 左下右上 # 将棋盘划分为两部分,(x+y>15)和(x+y<15) # 开始标志 startX=1 if pos[1]>=10 and pos[0]+pos[1]>15: startX=pos[0]+pos[1]-15 elif pos[0] - 4 >= 1 : startX = pos[0] - 4 # 结束标志 endX=ChessBoard.BOAED_SIZE if pos[1]<=5 and pos[0]+pos[1]<=15 : endX=pos[1] +pos[0]-1 elif pos[0] +4<=ChessBoard.BOAED_SIZE: endX = pos[0] + 4 # posX范围 # print("左下右上范围",startX,endX) for posX in range(startX,endX+1): posY = pos[1] + (pos[0] - posX) # print(posX,posY) if self.__chessboard.getChess((posX,posY))==color : count +=1 if count == 5: return True else: count=0 return False def isWonMan(self,chessman): if not isinstance(chessman.Pos,tuple) and not isinstance(chessman.Pos,list): raise Exception("第一个参数被选为元组或列表") if chessman.Pos[0] <= 0 or chessman.Pos[0]>ChessBoard.BOAED_SIZE: raise Exception('下标越界') if chessman.Pos[1] <= 0 or chessman.Pos[1]>ChessBoard.BOAED_SIZE: raise Exception('下标越界') # 判断某一方下子后是否赢 if not isinstance(chessman, ChessMan): raise Exception('第一个参数必须为ChessMan对象') pos=chessman.Pos color=chessman.Color return self.isWon(pos,color) def play(self): # 游戏主流程 state=True # 外循环 while 1: computerchessman = ChessMan() userchessman = ChessMan() # 用户选择先手或后手 order=input('先手1,后手2:') if order=='1': # 初始化棋子类 userchessman.Color = 'o' computerchessman.Color = 'x' state=True else: userchessman.Color = 'x' computerchessman.Color = 'o' state=False # 清空棋盘 self.__chessboard.initBoard() # 内循环 while 1: # 是否到用户下 if state: # 获取用户下棋位置 userInput = input("请输入用户下棋位置:") ret=self.userGo(userchessman, userInput) if ret: # 放置用户下棋位置 self.__chessboard.setChessMan(userchessman) # 打印棋盘 self.__chessboard.printBoard() # 是否赢了 if self.isWonMan(userchessman): # 退出外循环 print('uwin') break else: state=False else: continue # 电脑下 else: # 获取电脑随机下棋位置 self.computerGo(computerchessman) # 放置电脑下棋位置 self.__chessboard.setChessMan(computerchessman) # 打印棋盘 self.__chessboard.printBoard() # 是否赢了 if self.isWonMan(computerchessman) : # 退出外循环 print('cwin') break else: state=True cont=input('是否继续(1继续)') # 是否继续 if not (cont=='1'): break
/网络双人对战五子棋/Gobang_tcp/main.py
from chessboard import ChessBoard from chessman import ChessMan from engine import Engine from usergothread import UserGoThread from computergothread import ComputerGoThread def test1(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() chessboard.printBoard() def test2(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 在3,5位置放置x chessboard.setChess((3,5),'x') # 用棋子类在4,7放置o # 初始化棋子类 chessman=ChessMan() chessman.Pos=(4,7) chessman.Color='o' # 放置 chessboard.setChessMan(chessman) chessboard.printBoard() # 测试获取指定棋子位置 ret=chessboard.getChess((4,11)) print(ret) # 测试是否为空 ret= chessboard.isEmpty((1,1)) print(ret) def test3(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 初始化棋子类 chessman=ChessMan() chessman.Color='o' # 初始化引擎类 engine=Engine(chessboard) # 获取电脑随机下棋位置 engine.computerGo(chessman) # 放置电脑下棋位置 chessboard.setChessMan(chessman) # 打印棋盘 chessboard.printBoard() def test4(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 初始化棋子类 chessman=ChessMan() chessman.Color='o' # 初始化引擎类 engine=Engine(chessboard) # 获取用户下棋位置 userInput=input("请输入用户下棋位置:") engine.userGo(chessman,userInput) # 放置用户下棋位置 chessboard.setChessMan(chessman) # 打印棋盘 chessboard.printBoard() def test5(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 初始化棋子类 chessman=ChessMan() chessman.Color='o' # 初始化引擎类 engine=Engine(chessboard) while 1: # 获取用户下棋位置 userInput=input("请输入用户下棋位置:") engine.userGo(chessman,userInput) # 放置用户下棋位置 chessboard.setChessMan(chessman) # 打印棋盘 RET=engine.isWonMan(chessman) chessboard.printBoard() if RET: print('win') break def main(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 初始化引擎类 engine = Engine(chessboard) engine.play() def mainThread(): # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 初始化引擎类 engine = Engine(chessboard) userchessman = ChessMan() userchessman.Color='x' computerchessman = ChessMan() computerchessman.Color = 'o' # 创建线程和开启线程 tU=UserGoThread(userchessman,engine) tC=ComputerGoThread(computerchessman,engine) # z设置守护线程,主线程退出,子线程自动退出 tU.setDaemon(True) tC.setDaemon(True) tU.start() tC.start() while 1: #1.用户wait userchessman.WAIT() #2.用户下子 # 放置用户下棋位置 chessboard.setChessMan(userchessman) # 打印棋盘 chessboard.printBoard() # 判断用户是否赢 if engine.isWonMan(userchessman): print("u win") break #3.电脑notify computerchessman.NOTIFY() # 电脑子线程自动完成 #4.1电脑wait computerchessman.WAIT() #5.1电脑下子 # 放置电脑下棋位置 chessboard.setChessMan(computerchessman) # 打印棋盘 chessboard.printBoard() # 判断电脑是否赢 if engine.isWonMan(computerchessman): print("c win") break #6.1用户notify userchessman.NOTIFY() # 用户子线程自动完成 if __name__ == '__main__': # mainThread() test5()
/网络双人对战五子棋/Gobang_tcp/main_tcpserver.py
import socket import threading from chessboard import ChessBoard from chessman import ChessMan from engine import Engine from usergothread import RECV_UserGoThread,UserGoThread if __name__ == '__main__': # tcp套接字 server_socker = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 绑定端口 addr = ('', 8000) server_socker.bind(addr) # 开启监听 server_socker.listen() print('开始准备,等待连接') # 接收客户端连接 client_socker, client_info = server_socker.accept() print("客户端%s准备完毕" % client_info[0]) try: while 1: # 初始化棋盘 chessboard = ChessBoard() chessboard.initBoard() # 初始化引擎类 engine = Engine(chessboard) # 初始化两个棋子类 Firstchessman = ChessMan() Latterchessman = ChessMan() # 客户端选择先手或后手 # '先手1,后手其他:' print('对方(用户)选择先后手:', end='') order = client_socker.recv(1024).decode() Firstchessman.Color = 'o' Latterchessman.Color = 'x' if order == '1': # 对方先手,开启用户接收线程 print('对方先手') tU = RECV_UserGoThread(Firstchessman, engine, client_socker) tC = UserGoThread(Latterchessman, engine, client_socker) else: print('我方先手') tU = UserGoThread(Firstchessman, engine, client_socker) tC = RECV_UserGoThread(Latterchessman, engine, client_socker) # 设置守护线程,主线程退出,子线程自动退出 tU.setDaemon(True) tC.setDaemon(True) tU.start() tC.start() # 先手notify一下 Firstchessman.NOTIFY() while 1: # 1.先手等待 Firstchessman.WAIT() # 2.先手下子 # 放置先手下棋位置 chessboard.setChessMan(Firstchessman) # 打印棋盘 chessboard.printBoard() # 判断先手是否赢 if engine.isWonMan(Firstchessman): print("先手 win") break # 3.后手notify Latterchessman.NOTIFY() # 后手子线程自动完成 # 4.1电脑wait Latterchessman.WAIT() # 5.1后手下子 # 放置对方下棋位置 chessboard.setChessMan(Latterchessman) # 打印棋盘 chessboard.printBoard() # 判断后手是否赢 if engine.isWonMan(Latterchessman): print("后手 win") break # 6.1先手notify Firstchessman.NOTIFY() # 用户子线程自动完成 print("是否继续,继续选1(对方选择)") cont = client_socker.recv(1024).decode() if not (cont == '1'): print('退出') break else: print('继续') except Exception as e: print(e) # print(client_info[0], "断开连接")
/网络双人对战五子棋/Gobang_tcp/test.py
import pygame import time def isWon(): bcolor=[(250, 198, 115, 255),(250, 199, 108, 255),(249, 201, 101, 255)] wcolor = [(195, 164, 142, 255), ] if __name__ == '__main__': # 初始化 pygame.init() #创建窗口(必须) # set_mode会返回一个Surface对象,代表了在桌面上出现的那个窗口,三个参数第一个为元祖,代表分 辨率(必须);第二个是一个标志位,具体意思见下表,如果不用什么特性,就指定0;第三个为色深。 # RESIZABLE 创建一个可以改变大小的窗口 screen=pygame.display.set_mode((500,481),pygame.RESIZABLE,32) # 每格大小33x32 #获取背景图片 background_img=pygame.image.load(r'H:\pei_xun_python\wenjiang\0730\wzq\img\board.jpg') # 设置时钟 clock = pygame.time.Clock() # 棋子颜色切换 chess_state=True i=0 # 棋子图片字典 w={} b={} # 棋子位置存储 setwX=[] setwY=[] setbX=[] setbY=[] while True: # 监听事件 for event in pygame.event.get(): # 设置退出事件 if event.type== pygame.QUIT: exit() # 设置背景图片位置 screen.blit(background_img, (0, 0)) w [i]= pygame.image.load(r'H:\pei_xun_python\wenjiang\0730\wzq\img\w_chess.png') b [i]=pygame.image.load(r'H:\pei_xun_python\wenjiang\0730\wzq\img\b_chess.png') x,y=pygame.mouse.get_pos() # 如果chess_state为True,白棋,否则黑棋 if chess_state: screen.blit(w[0],(x-16.5,y-16)) else: screen.blit(b[0], (x-16.5,y-16)) # 获取键盘事件 key = pygame.key.get_pressed() # 按下Enter键放置棋子 if key[pygame.K_RETURN or pygame.K_SPACE]: x-=x%33 -5 y-=y%32 # 如果为True,白棋位置增加,否则黑棋位置增加 if chess_state: setwX.append(x) setwY.append(y) print(setwX[len(setwX) - 1], setwY[len(setwX) - 1], i) else: setbX.append(x) setbY.append(y) print(setwX[len(setbX) - 1], setwY[len(setbX) - 1], i) chess_state=not chess_state col = screen.get_at((x, y)) print(col) i+=1 time.sleep(0.5) for j in range(len(setwX)): screen.blit(w[j],(setwX[j],setwY[j])) for j in range(len(setbX)): screen.blit(b[j],(setbX[j],setbY[j])) #刷新画面 pygame.display.flip() # 设置刷新频率,每秒刷新n次 clock.tick(10)
/网络双人对战五子棋/Gobang_tcp/usergothread.py
import threading import time import re class RECV_UserGoThread(threading.Thread): def __init__(self,chessman,engine,client_socker): super().__init__() self.chessman=chessman self.engine=engine self.client_socker=client_socker self.con=threading.Condition() def run(self): # 先等待,解决先后手问题 self.chessman.WAIT() try: while 1: # 1.用户下棋 print('轮到对方下棋') # 获取用户下棋位置 recv_data = self.client_socker.recv(1024).decode('gbk') print('对方下棋位置:',recv_data) if len(recv_data) != 0 and recv_data != None: # 正则表达式判断输入的格式 (1-15,a-o或1-15) pattern = '^([1-9]|1[0-5]),([a-o]|[A-O]|[1-9]|1[0-5])$' ret = re.findall(pattern, recv_data) # print(ret) if len(ret): posX, posY = ret[0] posX = int(posX) # 如果第二个参数是字母,进行转数字的处理 if posY.isalpha() and ord(posY)>=97 : posY = ord(posY) - ord('a') + 1 elif posY.isalpha() and ord(posY)>=65: posY = ord(posY) - ord('A') + 1 else: posY = int(posY) self.chessman.Pos = (posX, posY) # 2.notify self.chessman.NOTIFY() # 3.wait self.chessman.WAIT() except Exception as e: print(e) class UserGoThread(threading.Thread): def __init__(self,chessman,engine,client_socker): super().__init__() self.chessman=chessman self.engine=engine self.client_socker=client_socker self.con=threading.Condition() def run(self): # 先等待,解决先后手问题 self.chessman.WAIT() try: while 1: # time.sleep(1) print('轮到我方下棋') # 1.用户下棋 # 获取用户下棋位置 userInput = input("请输入下棋位置:") ret=self.engine.userGo(self.chessman, userInput) if ret: # 发送棋子坐标 self.client_socker.send(userInput.encode('gbk')) # 2.notify self.chessman.NOTIFY() # 3.wait self.chessman.WAIT() else: print('输入格式错误或棋子重复') except Exception as e: print(e)
/网络双人对战五子棋/gobang2_client/chessboard.py
# -*- coding: utf-8 -*- # @Time : 2020/7/30 9:21 # @Author : LQY """ chessboard""" from chessman import * class ChessBoard(object): # 类属性 BOARD_SIZE = 15 # 棋盘的大小 def __init__(self): # 棋盘的下标从0到15,申请内存 self.__board = [[0 for i in range(ChessBoard.BOARD_SIZE+1)] for i in range(ChessBoard.BOARD_SIZE+1)] def initBoard(self): '''清空棋盘''' #直接忽略第0行 for i in range(1,ChessBoard.BOARD_SIZE+1): # 直接忽略第0列 for j in range(1, ChessBoard.BOARD_SIZE + 1): self.__board[i][j] = '+' def printBoard(self): '''打印棋盘''' # 打印列号 print(' ',end='') for j in range(1, ChessBoard.BOARD_SIZE + 1): c = chr(j + ord('a')-1) #转换成字母ABCD... print(c,end=' ') print() # 打印整个棋盘 for i in range(1,ChessBoard.BOARD_SIZE + 1): # 打印行号 print('%2d' %i,end='') for j in range(1, ChessBoard.BOARD_SIZE + 1): print(self.__board[i][j],end=' ') print() def setChess(self,pos,color): ''' 在棋盘上放置棋子 :param pos: 棋子的位置,该值是一个长度为2的元组 :param color: 棋子的颜色‘x'或’o' :return: ''' if not isinstance(pos,tuple) and not isinstance(pos,list): raise Exception('第一个参数必须为元组或列表') # 抛出异常 if pos[0] <= 0 or pos[0] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') self.__board[pos[0]][pos[1]] = color # 放置棋子 def setChessMan(self,chessman): ''' 在棋盘上放置棋子 :param chessman: 棋子对象,需要包含棋子颜色和位置 :return: ''' if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') pos = chessman.getPos() #接收棋子对象的位置 color = chessman.getColor() # 接收棋子对象的颜色 self.setChess(pos,color) def getChess(self,pos): ''' 根据坐标读取棋子 :param pos: 棋子的位置 :return: 棋子的颜色;x或o或+ ''' if not isinstance(pos,tuple) and not isinstance(pos,list): raise Exception('第一个参数必须为元组或列表') # 抛出异常 if pos[0] <= 0 or pos[0] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') return self.__board[pos[0]][pos[1]] def isEmpty(self,pos): ''' 判断某个坐标点是否为空 :param pos: 坐标位置 :return: True空,False不空 ''' if not isinstance(pos,tuple) and not isinstance(pos,list): raise Exception('第一个参数必须为元组或列表') # 抛出异常 if pos[0] <= 0 or pos[0] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') return self.getChess(pos) == '+'
/网络双人对战五子棋/gobang2_client/chessman.py
# -*- coding: utf-8 -*- # @Time : 2020/7/30 10:25 # @Author : LQY """ chessname""" import threading class ChessMan(object): ''' 棋子类 ''' def __init__(self): self.__pos = [0,0] self.__color = '+' self.con = threading.Condition() def setPos(self,pos): '''指定棋子位置''' self.__pos = pos def getPos(self): '''返回棋子的位置''' return self.__pos def setColor(self,color): '''指定棋子的颜色''' self.__color = color def getColor(self): '''返回棋子的位置''' return self.__color def doWait(self): self.con.acquire() self.con.wait() self.con.release() def doNotify(self): self.con.acquire() self.con.notify() self.con.release()
/网络双人对战五子棋/gobang2_client/clientRecv.py
# -*- coding: utf-8 -*- # @Time : 2020/8/1 12:58 # @Author : LQY """ clientserverRecv""" import threading from chessman import * from engine import * from chessboard import * import re class ClientRecvThread(threading.Thread): def __init__(self,chessboard,chesswhite,engine,client_socket): # 服务端收发要用客户端的socket '''初始化''' super().__init__() self.chessboard = chessboard self.chesswhite = chesswhite self.engine = engine self.client_socket = client_socket def run(self): # 等待主线程唤醒 self.chesswhite.doWait() while True: # 接收服务端发来的坐标信息 recv_pos = self.client_socket.recv(1024).decode('gbk') pattern = '^([1-9]|1[0-5]),([a-o]|[A-O]|[1-9]|1[0-5])$' ret = re.findall(pattern, recv_pos) if len(ret): posX, posY = ret[0] posX = int(posX) # 如果第二个参数是字母,进行转数字的处理 if posY.isalpha() and ord(posY) >= 97: posY = ord(posY) - ord('a') + 1 elif posY.isalpha() and ord(posY) >= 65: posY = ord(posY) - ord('A') + 1 else: posY = int(posY) # 判断是否为空 if self.chessboard.isEmpty((posX, posY)): self.chesswhite.setPos((posX, posY)) print("对方发过来的坐标是:", ret) # 3 电脑notify self.chesswhite.doNotify() # 1 电脑wait self.chesswhite.doWait()
/网络双人对战五子棋/gobang2_client/computergothread.py
# -*- coding: utf-8 -*- # @Time : 2020/7/31 10:29 # @Author : LQY """ computergothread""" import threading from chessman import * from engine import * from chessboard import * import re class ComputerGoThread(threading.Thread): '''电脑下棋的线程''' def __init__(self,chessboard,chessmanUser,engine,client_socket): # 服务端收发要用客户端的socket '''初始化''' super().__init__() self.chessboard = chessboard self.chessmanUser = chessmanUser self.engine = engine self.client_socket = client_socket def run(self): '''子线程执行的代码''' # address = ('', 9860) # self.server_socket.bind(address) # 绑定服务端地址和端口号 # # self.server_socket.listen(5) # 监听,最大连接数5 # # # 一直等待客户端连接,连接成功后则创建一个线程 # client_socket, client_info = self.server_socket.accept() # 申请连接 # 连接服务端 # address = ('192.168.55.29', 8000) # 对手的ip地址和端口号 # self.client_socket.connect(address) while True: # 1 电脑wait self.chessmanUser.doWait() # # 2 电脑下棋 # self.engine.computerGo(self.chessmanPC) # # # 接收客户端发来的坐标信息 # recv_pos = client_socket.recv(1024) # print("我是服务端,对方发过来的坐标是:",recv_pos.decode('gbk')) # # # self.chessboard.setChessMan(list(recv_pos.decode('gbk'))) # # # 下完棋后给客户端发送坐标信息 # ret = str(self.chessmanPC.getPos()).encode('gbk') # client_socket.send(ret) # 接收服务端发来的坐标信息 recv_pos = self.client_socket.recv(1024).decode('gbk') pattern = '^([1-9]|1[0-5]),([a-o])$' ret = re.findall(pattern, recv_pos) # print(ret) if ret: # 判断是否匹配成功 posX, posY = ret[0] # ret[0]是一个元组,把值依次传递 posX = int(posX) posY = ord(posY) - ord('a') + 1 # 判断是否为空 if self.chessboard.isEmpty((posX, posY)): self.chessmanUser.setPos((posX, posY)) # chessman.setColor('X') # 没有匹配到或者位置不空 print("我是客户端,对方发过来的坐标是:", ret) # 3 电脑notify self.chessmanUser.doNotify()
/网络双人对战五子棋/gobang2_client/engine.py
# -*- coding: utf-8 -*- # @Time : 2020/7/30 11:15 # @Author : LQY """ engine""" import random from chessman import * import re from chessboard import * class Engine(): def __init__(self,chessboard): self.__chessboard = chessboard def computerGo(self,chessman): ''' 电脑在随机位置下棋 :param chessman: 棋子对象,里面已经设置好棋子颜色 :return: ''' if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') while True: # 电脑随机下棋 posX = random.randint(1,15) posY = random.randint(1,15) # 判断位置是否为空 if self.__chessboard.isEmpty((posX,posY)): print(f'电脑下棋的位置:({posX},{posY})') # 1如果下棋位置为空,则把位置写入棋子对象中 chessman.setPos((posX,posY)) # chessman.setColor('O') # 设置电脑下白棋 # 退出循环 break # def userGo(self,chessman): # # if not isinstance(chessman,ChessMan): # raise Exception('第一个参数必须为ChessMan对象') # # while True: # pos = input("请输入pos:") # posX = int(pos[0]) # 转换为整型 # posY = ord(pos[2])-ord('A') + 1 #转换成数字,注意pos[1]是‘,' # # 判断位置是否为空 # if self.__chessboard.isEmpty((posX,posY)): # print(f'人下棋的位置:({posX},{posY})') # # 1如果下棋位置为空,则把位置写入棋子对象中 # chessman.setPos((posX,posY)) # chessman.setColor('X') # # 退出循环 # break def userGo(self,chessman,userinput): ''' 人下棋 :param chessman: 棋子对象 :param userinput: 用户输入的字符串,如’5,A' :return: ''' if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') # 采用正则表达式进行匹配,一位数1-9或者两位数:1[0-5],要用括号括起来代表提取 pattern = '^([1-9]|1[0-5]),([a-o])$' ret = re.findall(pattern,userinput) # print(ret) if re: #判断是否匹配成功 posX,posY = ret[0] #ret[0]是一个元组,把值依次传递 posX = int(posX) posY = ord(posY)-ord('a')+1 # 判断是否为空 if self.__chessboard.isEmpty((posX, posY)): chessman.setPos((posX, posY)) # chessman.setColor('X') return True # 没有匹配到或者位置不空 return False def isWon(self,pos,color): ''' 判断当下某一颗棋子后是否赢棋 :param pos:棋子的位置 :param color:棋子的颜色 :return:True为赢,False胜负未分 ''' if not isinstance(pos,tuple) and not isinstance(pos,list): raise Exception('第一个参数必须为元组或列表') # 抛出异常 if pos[0] <= 0 or pos[0] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') if pos[1] <= 0 or pos[1] > ChessBoard.BOARD_SIZE: raise Exception('下标越界') # 上下方向: 范围(pos[0]-4,pos[1])--(pos[0] + 4,pos[1]) start_x = 1 end_x = ChessBoard.BOARD_SIZE if pos[0] - 4 >= 1: start_x = pos[0] - 4 if pos[0] + 4 <= 15: end_x = pos[0] + 4 count = 0 for posX in range(start_x,end_x+1): if self.__chessboard.getChess((posX,pos[1])) == color: count += 1 if count >= 5: return True else: count = 0 # 左右方向:范围(pos[0],pos[1]-4)--(pos[0],pos[1] + 4) start_y = 1 end_y = self.__chessboard.BOARD_SIZE if pos[1] - 4 >= 1: start_y = pos[1] - 4 if pos[1] + 4 <= 15: end_y = pos[1] + 4 count = 0 for posY in range(start_y,end_y+1): if self.__chessboard.getChess((pos[0],posY)) == color: count += 1 if count >= 5: return True else: count = 0 # 左上右下 count = 0 s = pos[0] - pos[1] #计算行列间的差值 start = 0 end = 0 if pos[0] < pos[1]: #行比列小,s是负数,加在列 # 设取点为(5,6),从1开始到14结束,循环14次 start = 1 end = 15 + s if pos[0] == pos[1]: # 点都在对角线上,需要循环15次 start = 1 end = 15 if pos[0] > pos[1]: # 行比列大,s是正数,加在行 # 设取点(6,5),从2开始,15结束,循环14次 start = 1 + s end = 15 for i in range(start,end+1): if self.__chessboard.getChess((i,i-s)) == color: count += 1 if count >= 5: return True else: count = 0 # 左下右上 count = 0 s = pos[0] + pos[1] if s <= 16: # x+y<=16,设(5,6),循环10次,从1开始,到10,即(s-1)结束 start = start_x end = s - start_y for i in range(start, end + 1): if self.__chessboard.getChess((i,s - i)) == color: count += 1 if count >= 5: return True else: # 一旦断开 统计数清0 count = 0 if 16 < s <= 26: # x+y > 16,设(11,10),循环10次,从6,即(s%16+1)开始,15结束, start = s % 16 + 1 end = 15 for i in range(start, end + 1): if self.__chessboard.getChess((i,s - i)) == color: count += 1 if count >= 5: return True else: # 一旦断开 统计数清0 count = 0 # 四个条件均不满足 return False def isWonman(self,chessman): ''' 判断当下某一颗棋子后是否赢棋 :param chessman: 棋子对象,包括位置颜色 :return: True为赢,False胜负未分 ''' if not isinstance(chessman,ChessMan): raise Exception('第一个参数必须为ChessMan对象') pos = chessman.getPos() color = chessman.getColor() return self.isWon(pos,color) def play(self): '''游戏主流程''' userBlack = True # 用户选择黑棋则为True 用户选择白棋则为False 每盘棋改变一次 usergo = True # 轮到用户下则为True 轮到电脑下则为False 每步棋改变一次 chessmanUser = ChessMan() # 创建棋子对象 chessmanPc = ChessMan() while True: # 外循环 # 用户选择先后 user_sort = input("用户选择先后:(b代表黑棋先下,w代表白棋后下)") if user_sort == 'b': userBlack = True usergo = True else: userBlack = False usergo = False # 初始化棋盘 self.__chessboard.initBoard() # 方法不能直接Chessboard.initBoard(),因为定义的是实例方法,类只能用自己的类方法 # 判断是否轮到用户下 while True: # 内循环 # 如果用户选b,则用户先,下黑棋 if userBlack: chessmanUser.setColor('X') chessmanPc.setColor('O') else: chessmanUser.setColor('O') chessmanPc.setColor('X') if usergo: # 代表用户下 userinput = input("请用户输入下棋坐标:") user_ret = self.userGo(chessmanUser, userinput) if user_ret: # 返回真才把棋子传进chessman,并放置在棋盘上 self.__chessboard.setChessMan(chessmanUser) else: # 轮到电脑下 self.computerGo(chessmanPc) self.__chessboard.setChessMan(chessmanPc) self.__chessboard.printBoard() # 判断输赢 if usergo: user_iswon = self.isWonman(chessmanUser) if user_iswon: # 如果赢了判断是否继续游戏 print("你赢了") break # 如果赢棋就跳出内循环 else: com_iswon = self.isWonman(chessmanPc) if com_iswon: print("你输了") break # 如果赢棋就跳出内循环 usergo = not usergo # 判断是否继续游戏 user_contin = input("是否继续游戏:(是为y,否为n):") if user_contin == 'n': print("结束游戏") break # 结束游戏则跳出外循环 else: print("继续游戏")
/网络双人对战五子棋/gobang2_client/main.py
# -*- coding: utf-8 -*- # @Time : 2020/7/30 9:27 # @Author : LQY """ main""" from chessboard import * from chessman import * from engine import * import threading import time import random def test1(): chessboard = ChessBoard() chessboard.initBoard() chessboard.printBoard() def test2(): chessboard = ChessBoard() # 创建棋盘类对象 chessman = ChessMan() # 创建棋子类对象 # 清空棋盘 chessboard.initBoard() # 在(3,5)位置上放一颗黑棋 chessboard.setChess((3,5),'X') # 在(4,7)位置放一颗白棋 chessman.setPos((4,7)) chessman.setColor('O') chessboard.setChessMan(chessman) chessboard.printBoard() # 测试读取棋子 ret = chessboard.getChess((4,5)) print(ret) # 测试是否为空 ret = chessboard.isempty((4,14)) if ret: print('empty') else: print('not empty') def test3(): '''测试电脑下棋''' chessboard = ChessBoard() # 创建棋盘类对象 chessman = ChessMan() # 创建棋子类对象 engine = Engine(chessboard) #创建引擎对象,并把棋盘传进去 # 清空棋盘 chessboard.initBoard() # 电脑下棋 engine.computerGo(chessman) # 已将电脑的下棋位置传入chessman中 chessboard.setChessMan(chessman) # 棋盘把棋子放进对应位置 # 人下棋 while True: userinput = input("请输入下棋坐标:") ret = engine.userGo(chessman, userinput) if ret: # 返回真才把棋子传进chessman chessboard.setChessMan(chessman) # 打印棋盘 # chessboard.printBoard() break # 打印棋盘 chessboard.printBoard() def test4(): '''测试人下棋''' chessboard = ChessBoard() # 创建棋盘类对象 chessman = ChessMan() # 创建棋子类对象 engine = Engine(chessboard) # 创建引擎对象,并把棋盘传进去 # 清空棋盘 chessboard.initBoard() # 人下棋 userinput = input("请输入下棋坐标:") ret = engine.userGo(chessman,userinput) if ret: # 返回真才把棋子传进chessman chessboard.setChessMan(chessman) # 打印棋盘 chessboard.printBoard() def test5(): '''测试上下方向''' chessboard = ChessBoard() # 创建棋盘类对象 chessman = ChessMan() # 创建棋子类对象 engine = Engine(chessboard) # 清空棋盘 chessboard.initBoard() chessboard.setChess((3, 5), 'X') chessboard.setChess((4, 5), 'X') chessboard.setChess((5, 5), 'X') chessboard.setChess((6, 5), 'X') chessboard.setChess((7, 5), 'X') # chessboard.setChess((8, 5), 'X') # # 打印棋盘 chessboard.printBoard() # 判断输赢 ret = engine.isWon((3,5),'X') if ret: print("胜负已分") else: print("胜负未分") def test6(): '''测试左右方向''' chessboard = ChessBoard() # 创建棋盘类对象 chessman = ChessMan() # 创建棋子类对象 engine = Engine(chessboard) # 清空棋盘 chessboard.initBoard() chessboard.setChess((1, 1), 'X') chessboard.setChess((1, 2), 'X') # chessboard.setChess((1, 3), 'X') chessboard.setChess((1, 4), 'X') chessboard.setChess((1, 5), 'X') # chessboard.setChess((8, 5), 'X') # # 打印棋盘 chessboard.printBoard() # 判断输赢 ret = engine.isWon((1, 5), 'X') if ret: print("胜负已分") else: print("胜负未分") # def main(): # chessboard = ChessBoard() # engine = Engine(chessboard) # engine.play() from usergothread import * from computergothread import * import socket from clientRecv import * def mainThread(): # 创建客户端套接字 client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建服务端套接字 # server_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # computergothread = ComputerGoThread(chessboard,chessmanPC,engine,client_socket) # 连接服务端 address = ('192.168.55.29', 8000) # 对手的ip地址和端口号 client_socket.connect(address) while True: # 创建棋盘对象和引擎对象 chessboard = ChessBoard() engine = Engine(chessboard) # 初始化和打印棋盘 chessboard.initBoard() chessboard.printBoard() # 创建两个棋子对象 chessblack = ChessMan() chesswhite = ChessMan() chessblack.setColor('X') # 先手为黑棋 chesswhite.setColor('O') # 后手为白棋 # 判断先后 user_sort = input("请输入先后手:先为1") # 把先后信息发给对方 msg = user_sort.encode('gbk') client_socket.send(msg) if user_sort == '1': # 若选1,则自己下黑棋 # 把黑棋对象传给下棋进程,白棋对象传给接收线程 # 创建用户下棋线程和接收线程 print("我方先手") # 创建客户端下棋线程,传入chessblack唤醒UserGoThread usergothread = UserGoThread(chessblack, engine, client_socket) # 创建客户接收线程 clientrecvthread = ClientRecvThread(chessboard, chesswhite, engine, client_socket) else: # 若选0,则自己下白棋 # 把白棋对象传给下棋线程,黑棋对象传给接收线程 print("对方先手") usergothread = UserGoThread(chesswhite, engine, client_socket) clientrecvthread = ClientRecvThread(chessboard, chessblack, engine, client_socket) # 传入chessmanUser唤醒ClientRecvThread # 设置线程为守护线程,当主线程退出时子线程也随之退出 usergothread.setDaemon(True) # computergothread.setDaemon(True) clientrecvthread.setDaemon(True) # 开始线程 usergothread.start() # computergothread.start() clientrecvthread.start() # 先手(黑棋)notify # 若选1,唤醒usergothread中的wait # 若选0,唤醒clientrecvthread中的wait chessblack.doNotify() while True: # 1 用户wait chessblack.doWait() # 3 在棋盘上摆放用户下的棋子 chessboard.setChessMan(chessblack) chessboard.printBoard() # 判断输赢 if user_sort == '1': if engine.isWonman(chessblack): print("恭喜赢了") break if user_sort == '0': if engine.isWonman(chessblack): print("输了") break # 2 对方notify,唤醒客户端接收线程,接收对方的棋子 chesswhite.doNotify() # 4 电脑wait chesswhite.doWait() # 5 在棋盘上摆放对方下的棋子 chessboard.setChessMan(chesswhite) chessboard.printBoard() if user_sort == '1': if engine.isWonman(chesswhite): print("输了") break if user_sort == '0': if engine.isWonman(chesswhite): print("恭喜赢了") break # 6 用户notify chessblack.doNotify() # 是否继续游戏 userinput = input("是否继续游戏:是为1,否为0") # 把是否继续信息发给对方 msg = userinput.encode('gbk') client_socket.send(msg) if userinput == '0': break if __name__=='__main__': # test2() # test3() # test4() # test5() # test6() # main() mainThread()
/网络双人对战五子棋/gobang2_client/usergothread.py
# -*- coding: utf-8 -*- # @Time : 2020/7/31 10:29 # @Author : LQY """ usergothread""" import threading from chessman import * from engine import * from chessboard import * class UserGoThread(threading.Thread): '''用户下棋的线程''' def __init__(self,chessblack,engine,client_socket): '''初始化''' super().__init__() self.chessblack = chessblack self.engine = engine self.client_socket = client_socket def run(self): '''子线程执行的代码''' # 等待主线程唤醒 self.chessblack.doWait() while True: # 1 用户下棋 userinput = input("请用户输入下棋坐标:") ret = self.engine.userGo(self.chessblack,userinput) if ret: # 给服务端发消息说我方已下完棋,轮到对方下棋 # 向服务器发送信息,并传递我方下的棋子的坐标 print("我是客户端,我方已下完棋") # self.client_socket.send(msg.encode('gbk')) ret = self.chessblack.getPos() msg = str(ret[0])+str(',')+str(ret[1]) self.client_socket.send(msg.encode('gbk')) # 2 用户notify self.chessblack.doNotify() # 3 用户wait self.chessblack.doWait() else: print("下棋重复")
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
astonshane/Alien-Spoilers
refs/heads/master
{"/alienspoilers/subs/oauthHelpers.py": ["/alienspoilers/subs/subreddit.py", "/alienspoilers/subs/forms.py", "/alienspoilers/subs/models.py"], "/alienspoilers/subs/renderings.py": ["/alienspoilers/subs/subreddit.py", "/alienspoilers/subs/models.py"], "/alienspoilers/subs/views.py": ["/alienspoilers/subs/subreddit.py", "/alienspoilers/subs/forms.py", "/alienspoilers/subs/models.py"], "/alienspoilers/home/views.py": ["/alienspoilers/subs/forms.py", "/alienspoilers/subs/models.py"], "/alienspoilers/subs/admin.py": ["/alienspoilers/subs/models.py"], "/alienspoilers/subs/forms.py": ["/alienspoilers/subs/models.py"]}
└── ├── alienspoilers │ ├── home │ │ └── views.py │ └── subs │ ├── admin.py │ ├── forms.py │ ├── migrations │ │ └── 0006_auto_20150204_1435.py │ ├── models.py │ ├── oauthHelpers.py │ ├── renderings.py │ ├── subreddit.py │ ├── urls.py │ └── views.py ├── old │ ├── subscribe.py │ └── toggle_sub.py └── test.py
/alienspoilers/home/views.py
from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from subs.forms import UserForm, UserProfileForm from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from uuid import uuid4 from django.contrib.auth.models import User from subs.models import UserProfile from django.utils import timezone import datetime import urllib import ConfigParser import sys import os scriptpath = "subs/" # Add the directory containing your module to the Python path (wants absolute paths) sys.path.append(os.path.abspath(scriptpath)) # Do the import from oauthHelpers import * from renderings import * # Create your views here. def index(request): if request.user.is_authenticated(): #if user hasn't linked their reddit account yet, send them to a page to do that... profile = request.user.profile if profile.reddit_linked: return index_render(request) else: return render(request, 'subs/index.html', {'link_url': make_authorization_url()}) else: user_form = UserForm() profile_form = UserProfileForm() return render(request, 'home/index.html', {'home': True, 'user_form': user_form, 'profile_form': profile_form}) #return HttpResponse("Hello World. Homepage")
/alienspoilers/subs/admin.py
from django.contrib import admin from subs.models import Event from subs.models import UserProfile class EventAdmin(admin.ModelAdmin): list_display = ('title', 'creator', 'subreddit', 'subreddit_fullname', 'start_date', 'end_date', 'pub_date', 'repeat', 'repeat_type', 'was_published_recently') # Register your models here. admin.site.register(Event, EventAdmin) admin.site.register(UserProfile)
/alienspoilers/subs/forms.py
from django import forms from django.contrib.auth.models import User from subs.models import UserProfile, Event import datetime from django.utils import timezone from django.contrib.admin import widgets class UserForm(forms.ModelForm): password = forms.CharField(widget=forms.PasswordInput()) class Meta: model = User fields = ('username', 'email', 'password') class UserProfileForm(forms.ModelForm): class Meta: model = UserProfile fields = () class CreateEventForm(forms.ModelForm): #start_date = forms.DateTimeField(widget=forms.DateTimeInput(), initial=timezone.now()) #end_date = forms.DateTimeField(widget=forms.DateTimeInput(), initial=timezone.now() + datetime.timedelta(days=1)) class Meta: model = Event fields = ['title', 'subreddit']
/alienspoilers/subs/migrations/0006_auto_20150204_1435.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('subs', '0005_auto_20150204_1432'), ] operations = [ migrations.RemoveField( model_name='userprofile', name='reddit_linked', ), migrations.RemoveField( model_name='userprofile', name='reddit_refresh_token', ), ]
/alienspoilers/subs/models.py
import datetime from django.db import models from django.utils import timezone from django.contrib.auth.models import User from django.contrib import admin import uuid class UserProfile(models.Model): # This line is required. Links UserProfile to a User model instance. user = models.OneToOneField(User, related_name="profile") # The additional attributes we wish to include. refresh_token = models.CharField(max_length=200, null=True) access_token = models.CharField(max_length=200, null=True) token_expiry = models.DateTimeField('token expiry', null=True) reddit_linked = models.BooleanField(default=False) # Override the __unicode__() method to return out something meaningful! def __unicode__(self): return self.user.username # Create your models here. class Event(models.Model): creator = models.ForeignKey(User, blank=True, null=True) title = models.CharField(max_length=200) subreddit = models.CharField(max_length=200) subreddit_fullname = models.CharField(max_length=200, null=True) pub_date = models.DateTimeField('date published') start_date = models.DateTimeField('start date') end_date = models.DateTimeField('end date') event_id = models.CharField(max_length=200) finished = models.BooleanField(default=False) repeat = models.BooleanField(default=False) repeat_type = models.CharField(max_length=200, null=True) def __str__(self): return self.title def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1) was_published_recently.admin_order_field = 'pub_date' was_published_recently.boolean = True was_published_recently.short_description = 'Published recently?'
/alienspoilers/subs/oauthHelpers.py
from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from subs.forms import UserForm, UserProfileForm from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from uuid import uuid4 from django.contrib.auth.models import User from subs.models import UserProfile, Event from django.utils import timezone import datetime import urllib import requests import ConfigParser from subreddit import Subreddit #parse the config file to get the oauth client id/secret Config = ConfigParser.ConfigParser() Config.read("config.ini") CLIENT_ID = Config.get("reddit", "oauth_client_id") CLIENT_SECRET = Config.get("reddit", "ouath_client_secret") REDIRECT_URI = Config.get("reddit", "oauth_redirect_uri") def user_agent(): ''' reddit API clients should each have their own, unique user-agent Ideally, with contact info included. ''' return "Alien Spoilers - astonshane@gmail.com - v0.0.2" def base_headers(): return {"User-Agent": user_agent()} def make_authorization_url(): # Generate a random string for the state parameter state = str(uuid4()) print REDIRECT_URI params = {"client_id": CLIENT_ID, "response_type": "code", "state": state, "redirect_uri": REDIRECT_URI, "duration": "permanent", "scope": "identity,mysubreddits,subscribe"} url = "https://ssl.reddit.com/api/v1/authorize?" + urllib.urlencode(params) return url def get_initial_token(request, code): print "getting initial token" client_auth = requests.auth.HTTPBasicAuth(CLIENT_ID, CLIENT_SECRET) post_data = {"grant_type": "authorization_code", "code": code, "redirect_uri": REDIRECT_URI} headers = base_headers() response = requests.post("https://ssl.reddit.com/api/v1/access_token", auth=client_auth, headers=headers, data=post_data) token_json = response.json() #update the UserProfile data model with the new data profile = request.user.profile profile.access_token = token_json["access_token"] profile.refresh_token = token_json["refresh_token"] profile.reddit_linked = True profile.token_expiry = timezone.now() + datetime.timedelta(hours=1) profile.save() return token_json["access_token"] def refresh_token(profile): print "refreshing token" client_auth = requests.auth.HTTPBasicAuth(CLIENT_ID, CLIENT_SECRET) post_data = {"grant_type": "refresh_token", "refresh_token": profile.refresh_token, "redirect_uri": REDIRECT_URI} headers = base_headers() response = requests.post("https://ssl.reddit.com/api/v1/access_token", auth=client_auth, headers=headers, data=post_data) token_json = response.json() #update the UserProfile data model with the new data #profile = request.user.profile profile.access_token = token_json["access_token"] #profile.refresh_token = token_json["refresh_token"] profile.token_expiry = timezone.now() + datetime.timedelta(hours=1) profile.save() #return token_json["access_token"] def get_username(access_token): headers = base_headers() headers.update({"Authorization": "bearer " + access_token}) response = requests.get("https://oauth.reddit.com/api/v1/me", headers=headers) me_json = response.json() #print me_json return me_json['name'] def get_my_subreddits(access_token): headers = base_headers() headers.update({"Authorization": "bearer " + access_token}) response = requests.get("https://oauth.reddit.com/subreddits/mine/subscriber?limit=100", headers=headers) dump = response.json() #print my_subreddits data = dump['data'] all_subreddits = data['children'] #dictionary my_subreddits = [] for subreddit in all_subreddits: data = subreddit['data'] fullname = data['name'] url = data['url'] # gets rid of the u' thing name = url.encode('utf-8') #print name, fullname sub = Subreddit(name, fullname) my_subreddits.append(sub) return my_subreddits def unsubscribe(access_token, fullname): headers = base_headers() headers.update({"Authorization": "bearer " + access_token}) headers.update({"Content-Type": "application/json"}) rqst = "https://oauth.reddit.com/api/subscribe?action=unsub&sr=" + fullname #print rqst response = requests.post(rqst, headers=headers) dump = response.json() #print dump def subscribe(access_token, fullname): headers = base_headers() headers.update({"Authorization": "bearer " + access_token}) headers.update({"Content-Type": "application/json"}) rqst = "https://oauth.reddit.com/api/subscribe?action=sub&sr=" + fullname #print rqst response = requests.post(rqst, headers=headers) dump = response.json() #print dump def newRepeatedEvent(event): newEvent = Event() newEvent.creator = event.creator newEvent.title = event.title newEvent.subreddit = event.subreddit newEvent.subreddit_fullname = event.subreddit_fullname newEvent.pub_date = event.pub_date if event.repeat_type == "Weekly": newEvent.start_date = event.start_date + datetime.timedelta(weeks=1) newEvent.end_date = event.end_date + datetime.timedelta(weeks=1) elif event.repeat_type == "Daily": newEvent.start_date = event.start_date + datetime.timedelta(days=1) newEvent.end_date = event.end_date + datetime.timedelta(days=1) newEvent.event_id = uuid4() newEvent.finished = False newEvent.repeat = True newEvent.repeat_type = event.repeat_type newEvent.save() def checkEvents(user): profile = user.profile #refresh the access_token if necessary if(timezone.now() >= profile.token_expiry): refresh_token(profile) access_token = profile.access_token my_subreddits = get_my_subreddits(access_token) #get all of the events events = Event.objects.filter(creator = user) #loop through them for event in events: current_time = timezone.now() #if the current time after the start date of this event and # it hasn't been marked as complete... if current_time > event.start_date and not event.finished: found = False fullname = event.subreddit_fullname #search for the subreddit for this event in my subreddits for subreddit in my_subreddits: #print subreddit.name if subreddit.fullname == fullname: found = True break if current_time < event.end_date and found: print "unsubscribing from ", event.subreddit unsubscribe(access_token, fullname) elif current_time > event.end_date and not found: print "subscribing to ", event.subreddit subscribe(access_token, fullname) event.finished = True event.save() #if this was a repeated event, create the next event in the sequence if event.repeat: print "creating new event..." newRepeatedEvent(event)
/alienspoilers/subs/renderings.py
from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from uuid import uuid4 from django.contrib.auth.models import User from subs.models import UserProfile, Event from django.utils import timezone import datetime import urllib import requests from subreddit import Subreddit import sys import os scriptpath = "subs/" # Add the directory containing your module to the Python path (wants absolute paths) sys.path.append(os.path.abspath(scriptpath)) from oauthHelpers import * def my_subreddits_render(request): profile = request.user.profile if(timezone.now() >= profile.token_expiry): refresh_token(profile) user_name = get_username(profile.access_token) my_subreddits = get_my_subreddits(profile.access_token) return render(request, 'subs/my_subreddits.html', {'user_name': user_name, 'my_subreddits': my_subreddits}) def index_render(request): user = request.user profile = request.user.profile if request.method == 'POST': event_id = request.GET.get('id', '') #print "event_id: ", event_id '''events = Event.objects.filter(creator = user) print events for event in events: print " ", event.event_id, event.title events = Event.objects.filter(event_id = event_id) print events''' event = Event.objects.filter(creator=user, event_id=event_id) #print event[0] event[0].delete() checkEvents(user) #get all of the events which the user is a part of... events = Event.objects.filter(creator = user) current_events = [] past_events = [] for event in events: if event.end_date > timezone.now(): current_events.append(event) else: past_events.append(event) '''print "current events: ", current_events print "past events: ", past_events''' return render(request, 'subs/index.html', {'current_events': current_events, 'past_events': past_events})
/alienspoilers/subs/subreddit.py
class Subreddit: def __init__(self, name, fullname): self.name = name self.fullname = fullname self.url = "http://www.reddit.com" + name def __str__(self): print self.name
/alienspoilers/subs/urls.py
from django.conf.urls import patterns, url from subs import views urlpatterns = patterns('', url(r'^$', views.index, name='index'), url(r'^register/$', views.register, name='register'), url(r'^login/$', views.user_login, name='login'), url(r'^logout/$', views.user_logout, name='logout'), url(r'^authorize_callback/$', views.user_authorize_callback, name='callback'), url(r'^my_subreddits/$', views.my_subreddits, name='my_subreddits'), url(r'^create_event/$', views.create_event, name='create_event'), url(r'^link_account/$', views.link_account, name='link_account'), )
/alienspoilers/subs/views.py
from django.shortcuts import render from django.http import HttpResponse, HttpResponseRedirect from subs.forms import UserForm, UserProfileForm, CreateEventForm from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from uuid import uuid4 from django.contrib.auth.models import User from subs.models import UserProfile, Event from django.utils import timezone import datetime import urllib import ConfigParser from subreddit import Subreddit from oauthHelpers import * from renderings import * import praw #parse the config file to get the oauth client id/secret Config = ConfigParser.ConfigParser() Config.read("config.ini") CLIENT_ID = Config.get("reddit", "oauth_client_id") CLIENT_SECRET = Config.get("reddit", "ouath_client_secret") REDIRECT_URI = Config.get("reddit", "oauth_redirect_uri") @login_required def index(request): profile = request.user.profile if profile.reddit_linked: return index_render(request) else: #if user hasn't linked their reddit account yet, send them to a page to do that... return render(request, 'subs/link_account.html', {'link_url': make_authorization_url()}) @login_required def my_subreddits(request): profile = request.user.profile if profile.reddit_linked: return my_subreddits_render(request) else: #if user hasn't linked their reddit account yet, send them to a page to do that... return render(request, 'subs/link_account.html', {'link_url': make_authorization_url()}) @login_required def link_account(request): profile = request.user.profile if profile.reddit_linked: return index_render(request) else: return index_render(request) def register(request): # A boolean value for telling the template whether the registration was successful. # Set to False initially. Code changes value to True when registration succeeds. registered = False # If it's a HTTP POST, we're interested in processing form data. if request.method == 'POST': # Attempt to grab information from the raw form information. # Note that we make use of both UserForm and UserProfileForm. user_form = UserForm(data=request.POST) profile_form = UserProfileForm(data=request.POST) # If the two forms are valid... if user_form.is_valid() and profile_form.is_valid(): # Save the user's form data to the database. user = user_form.save() # Now we hash the password with the set_password method. # Once hashed, we can update the user object. user.set_password(user.password) user.save() # Now sort out the UserProfile instance. # Since we need to set the user attribute ourselves, we set commit=False. # This delays saving the model until we're ready to avoid integrity problems. profile = profile_form.save(commit=False) profile.user = user # Now we save the UserProfile model instance. profile.save() # Update our variable to tell the template registration was successful. registered = True # Invalid form or forms - mistakes or something else? # Print problems to the terminal. # They'll also be shown to the user. else: print user_form.errors, profile_form.errors # Not a HTTP POST, so we render our form using two ModelForm instances. # These forms will be blank, ready for user input. else: user_form = UserForm() profile_form = UserProfileForm() # Render the template depending on the context. return render(request, 'subs/register.html', {'user_form': user_form, 'profile_form': profile_form, 'registered': registered} ) def user_login(request): # If the request is a HTTP POST, try to pull out the relevant information. if request.method == 'POST': # Gather the username and password provided by the user. # This information is obtained from the login form. username = request.POST['username'] password = request.POST['password'] # Use Django's machinery to attempt to see if the username/password # combination is valid - a User object is returned if it is. user = authenticate(username=username, password=password) # If we have a User object, the details are correct. # If None (Python's way of representing the absence of a value), no user # with matching credentials was found. if user: # Is the account active? It could have been disabled. if user.is_active: # If the account is valid and active, we can log the user in. # We'll send the user back to the homepage. login(request, user) #run checks upon login to see if the user has any events current events: if user.profile.reddit_linked: checkEvents(user) return HttpResponseRedirect('/subs/') else: # An inactive account was used - no logging in! return HttpResponse("Your AlienSpoilers account is disabled.") else: # Bad login details were provided. So we can't log the user in. print "Invalid login details: {0}, {1}".format(username, password) return render(request,'subs/login.html', {'invalid':True}) #return HttpResponse("Invalid login details supplied.") # The request is not a HTTP POST, so display the login form. # This scenario would most likely be a HTTP GET. else: # No context variables to pass to the template system, hence the # blank dictionary object... return render(request, 'subs/login.html', {'invalid':False}) # Use the login_required() decorator to ensure only those logged in can access the view. @login_required def user_logout(request): # Since we know the user is logged in, we can now just log them out. logout(request) # Take the user back to the homepage. return HttpResponseRedirect('/') @login_required def user_authorize_callback(request): error = request.GET.get('error', '') if error: return "Error: " + error state = request.GET.get('state', '') code = request.GET.get('code') get_initial_token(request, code) return index_render(request) @login_required def create_event(request): # A boolean value for telling the template whether the registration was successful. # Set to False initially. Code changes value to True when registration succeeds. created = False # If it's a HTTP POST, we're interested in processing form data. if request.method == 'POST': # Attempt to grab information from the raw form information. event_form = CreateEventForm(data=request.POST) # If the for is valid... # Save the user's form data to the database. event = Event() event.subreddit = request.POST['subreddit'] event.title = request.POST['title'] event.start_date = request.POST['start_date'] event.end_date = request.POST['end_date'] event.creator = request.user event.event_id = uuid4() event.pub_date = timezone.now() repeat = request.POST['choice'] if repeat != "None": event.repeat = True event.repeat_type = repeat #print request.POST #for p in request.POST: # print p, request.POST[p] #startDate = request.POST['datetimepicker6'] #print startDate r = praw.Reddit(user_agent()) sr = event.subreddit.replace("/r/","") #print sr try: #print "subreddit: ####", sr x = r.get_subreddit(sr, fetch=True) event.subreddit_fullname = x.fullname.encode('utf-8') #print event.subreddit_fullname except: #the subreddit lookup failed... # display an error message print "invalid subreddit entered" event_form = CreateEventForm() return render(request, 'subs/create_event.html', {'event_form': event_form, 'created': created, 'invalid': True, 'msg': "Invalid subreddit entered. Try again"}) try: #save the form event.save() # Update our variable to tell the template the event creation was successful. created = True return index_render(request) except: print "ERROR occured while saving event" print sys.exc_info()[0] event_form = CreateEventForm() return render(request, 'subs/create_event.html', {'event_form': event_form, 'created': created, 'invalid': True, 'msg': "Error while saving event. Try Again"}) # Not a HTTP POST, so we render our form using two ModelForm instances. # These forms will be blank, ready for user input. else: event_form = CreateEventForm() # Render the template depending on the context. return render(request, 'subs/create_event.html', {'event_form': event_form, 'created': created} )
/old/subscribe.py
import praw import sys def is_subscribed(subreddit): r = praw.Reddit(user_agent = "Alien Spoilers: v0.0.1") r.login() subs = r.get_my_subreddits() for sub in subs: if str(sub) == subreddit: return True return False def subscribe(subreddit): r = praw.Reddit(user_agent = "Alien Spoilers: v0.0.1") try: r.login() try: r.get_subreddit(subreddit).subscribe() print "Subscribed to /r/", subreddit except: if is_subscribed(subreddit): print "ERROR: You are already subscribed to", subreddit else: print "Unexpected error:", sys.exc_info()[0] print "Could not subscribe to /r/", subreddit except: print "Unexpected error: Failed during login login: check username / password" def unsubscribe(subreddit): r = praw.Reddit(user_agent = "Alien Spoilers: v0.0.1") try: r.login() try: r.get_subreddit(subreddit).unsubscribe() print "Unsubscribed from /r/", subreddit except: if not is_subscribed(subreddit): print "ERROR: You are not subscribed to ", subreddit else: print "Unexpected error:", sys.exc_info()[0] print "Could not unsubscribe from /r/", subreddit except: print "Unexpected error: Failed during login login: check username / password"
/old/toggle_sub.py
import praw import sys try: r = praw.Reddit(user_agent = "astonshane - test praw") r.login() subs = r.get_my_subreddits() found = False for sub in subs: s = str(sub) if s.lower() == "python": found = True if found: r.get_subreddit('python').unsubscribe() print "unsubscribing..." else: r.get_subreddit('python').subscribe() print "subscribing..." except: print "Unexpected error:", sys.exc_info()[0]
/test.py
import praw s = '/r/sarah' s = s.replace("/r/","") r = praw.Reddit('test by astonshane') x = "" try: x = r.get_subreddit(s, fetch=True) print "succeeded" except: print x, "failed" #fullname = x.fullname.encode('utf-8') #print fullname
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
gaosui/Biazza
refs/heads/master
{"/backend/main.py": ["/backend/search.py"]}
└── └── backend ├── build_cache.py ├── main.py ├── piazza_api │ └── __init__.py ├── save_model.py ├── search.py └── test.py
/backend/build_cache.py
from piazza_api import Piazza from html.parser import HTMLParser import json class MyParser(HTMLParser): def __init__(self): super().__init__(convert_charrefs=True) self.data = [] def handle_data(self, d): self.data.append(d) def feed(self, d): super().feed(d + '<div>&nbsp;</div>') def get_data(self): return ''.join(self.data) f = open('settings.json') settings = json.load(f) f.close() p = Piazza() p.user_login(settings['email'], settings['pwd']) cache = {} for cl in settings['classes']: print(cl) net = p.network(cl) ls = {} for post in net.iter_all_posts(): parser = MyParser() parser.feed(post['history'][0]['subject']) parser.feed(post['history'][0]['content']) for child in post['children']: if 'subject' in child: parser.feed(child['subject']) for cchild in child['children']: parser.feed(cchild['subject']) else: parser.feed(child['history'][0]['content']) post['content'] = parser.get_data().replace('\n', ' ') ls[post['nr']] = post cache[cl] = ls f = open('cache.json', 'w') json.dump(cache, f) f.close()
/backend/main.py
from http.server import BaseHTTPRequestHandler, HTTPServer import json import sys import urllib.parse as up from search import predict from gensim.models import KeyedVectors model = KeyedVectors.load("vectors.kv", mmap='r') print('model loaded') f = open('cache.json') cache = json.load(f) f.close() print('cache loaded') def buildDict(cid): nrs = [] posts = [] for k, v in cache[cid].items(): nrs.append(k) posts.append(v['content']) return posts, nrs class Handler(BaseHTTPRequestHandler): def do_GET(self): args = up.parse_qs(self.path.split('?')[1]) cid = args['cid'][0] posts, nrs = buildDict(cid) res = predict(posts, nrs, args['key'][0], model) objs = [] for r in res: sub = cache[cid][r]['history'][0]['subject'] short = cache[cid][r]['content'].lstrip(sub) short = short.lstrip(' ') objs.append({ 'nr': r, 'sub': sub, 'short': short[0:120], 'red': 'unanswered' in cache[cid][r]['tags'] }) self.send_response(200) self.send_header('Access-Control-Allow-Origin', '*') self.end_headers() self.wfile.write(json.dumps(objs).encode()) return print('start server') HTTPServer(('', 3000), Handler).serve_forever()
/backend/piazza_api/__init__.py
from piazza_api.piazza import Piazza __version__ = "0.8.0"
/backend/save_model.py
from gensim.test.utils import get_tmpfile from gensim.models import KeyedVectors model = KeyedVectors.load_word2vec_format("./GoogleNews-vectors-negative300.bin", binary=True) print("finish loading start saving") fname = "vectors.kv" model.save(fname) print("reload") word_vectors = KeyedVectors.load("vectors.kv", mmap='r') print("end")
/backend/search.py
import nltk from nltk.corpus import stopwords import re import numpy as np import string from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from scipy.spatial.distance import cosine from gensim.models import KeyedVectors from gensim.test.utils import datapath def setup(): nltk.download('stopwords') nltk.download('punkt') def tokenize_a_sentence(sentence): stop_words = set(stopwords.words('english')) sentence = sentence.strip() line_tokens = re.split(' |_|\n',sentence) line_tokens = list(filter(lambda a: a != '', line_tokens)) filtered= [] for i in range(len(line_tokens)): token = line_tokens[i] if(token not in stop_words): for j in reversed(range(len(token))): if(token[j].isalpha() ): token = token[:j+1] break filtered.append(token) return filtered def tokenize(docs): all_doc_tokens = [] index = 0 for doc in docs: tokens = [] sentences = doc.lower().split(". |, |; |\n") tokens = [] for i in range(len(sentences)): tokens += tokenize_a_sentence(sentences[i]) all_doc_tokens.append([index,tokens]) index+=1 return all_doc_tokens def prepare_search(query): token_list = tokenize_a_sentence(query) return token_list def predict(piazza_data, ids, query, model): list_of_lines = tokenize(piazza_data) corpus = [] for text in list_of_lines: corpus.append(' '.join([l.rstrip().lower().translate(str.maketrans('','',string.punctuation)) for l in text[1]])) tfidf = TfidfVectorizer(tokenizer=nltk.word_tokenize, stop_words='english', min_df=1, max_df=0.8) tfs = tfidf.fit_transform(corpus) similarity = lambda u, v: 1-cosine(u, v) token_list = prepare_search(query) similar_keys = {} substring_keys = {} vocab = tfidf.vocabulary_ for k in vocab.keys(): for t in token_list: if(t in k and t != k): if(k not in substring_keys): substring_keys[k] = 1 #token_list.append(k) if(k in model and k not in substring_keys and k not in token_list): if (t in model and similarity(model[k],model[t]) > 0.6 ): similar_keys[k] = 1 similar_list = list(similar_keys.keys()) search = ' '.join(token_list+similar_list) print(search) search_tf = tfidf.transform([search]) cids = [] sims = [] for i, sim in enumerate(cosine_similarity(tfs, search_tf)): if(sim[0] > 0.1): cids.append(ids[i]) sims.append(sim[0]) if(len(cids) < 2): for i, sim in enumerate(cosine_similarity(tfs, search_tf)): if(sim[0] > 0.05): cids.append(ids[i]) sims.append(sim[0]) for sub in substring_keys.keys(): sub_tf = tfidf.transform([sub]) for i, sim in enumerate(cosine_similarity(tfs, sub_tf)): if(sim[0] > 0.05 and ids[i] not in cids): cids.append(ids[i]) sims.append(sim[0]) cids = np.asarray(cids) sims = np.asarray(sims) return reversed(cids[np.argsort(sims)])
/backend/test.py
import json import sys import search from gensim.models import KeyedVectors #model = KeyedVectors.load_word2vec_format("./GoogleNews-vectors-negative300.bin", binary=True) model = KeyedVectors.load("vectors.kv", mmap='r') f = open('cache.json') cache = json.load(f) f.close() #for i in range() cids = list(cache['jml6wogpji0o3'].keys()) posts = [] for i in cids: posts.append(cache['jml6wogpji0o3'][str(i)]['content']) search.setup() results = search.predict(posts, cids, "software", model) print(results) #for result in results: #print(cache['jml6wogpji0o3'][result]['content']) #print()
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
valexandersaulys/flask-ladder
refs/heads/master
{"/app/csrf_protect.py": ["/app/utils.py"]}
└── ├── app │ ├── __init__.py │ ├── accounts.py │ ├── csrf_protect.py │ ├── forms.py │ ├── models.py │ ├── utils.py │ └── views.py ├── config.py ├── fabric_scripts │ ├── fabfile.py │ └── setup_server.py ├── manage.py └── run.py
/app/__init__.py
from flask import Flask from flask_sqlalchemy import SQLAlchemy # alternatively, from flask_mongoengine import MongoEngine import os from config import BASEDIR app = Flask(__name__) app.config.from_object('config') db = SQLAlchemy(app) # db = MongoEngine(app) # User Authentication Dictionary (would be Redis-cache in production) authhashes = {}; # --------------------- Logging Errors import logging from logging.handlers import RotatingFileHandler file_handler = RotatingFileHandler("activities.log",maxBytes=100000,backupCount=1); file_handler.setLevel(10); app.logger.addHandler(file_handler); app.logger.setLevel(level=0); logger = app.logger # ==================== Start the App ! """ blueprints live just below the app/ folder as subfolders with a similar layout to the BASEDIR/app/ folder. >>> from app.simple_page import simple_page >>> app.register_blueprint(simple_page) and within BASEDIR/app/simple_page/__init__.py >>> simple_page = Blueprint('simple_page, __name__, template_folder="templates") Then add simple_page routing like in the BASEDIR/app/ """ # I though below was >>> from app import views, models import csrf_protect import views, models
/app/accounts.py
# 'User' Model (for MySQL) from app import app, db from werkzeug.security import generate_password_hash, \ check_password_hash class User(db.Model): # All models should have an 'id' id = db.Column(db.Integer, index=True, primary_key=True) # Standard user stuff username = db.Column(db.String(128), unique=True) password = db.Column(db.String(128), unique=True) def set_password(self, password): self.password = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password, password) def __repr__(self): # what gets printed in the console during debugging return "<User %r>" % (self.username) # login_required decorator from functools import wraps from flask import g, request, redirect, url_for def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if g["user"] is None: return redirect( url_for("login_page") ); return f(*args, **kwargs) return decorated_function # login pages @app.route("/login_page",methods=["GET"]) def login_page(): return render_template("login_page.html") @app.route("/login",methods=["POST"]) def login(): if request.headers['Content-Type'] == "application/json": name_of_user = request.json['username'] pass_check = request.json['password'] """ Store & then return some random hash as a key for authentication. Then check for it as a login in a RESTful authentication. from flask import jsonify from app import authhashes from app.utils import hash_generator hashname = hash_generator(); authhashes[hashname] = name_of_user; return jsonify(authkey=hashname); """ else: name_of_user = request.form["username"] pass_check = request.form["password"] u = db.session.query(User).\ filter_by(username=request.form['username']).first() if u is not None: verification = u.check_password(pass_check) if verification==True: g["user"] = u.username; logger.info("User %s Has Logged in" % str(session['user'])) return redirect( url_for("homepage") ); return render_template("error.html"); @app.route("/logout",methods=['GET','POST']) def logout(): g["user"] = None; logger.info("User %s Has Logged Out" % str(session['user'])); return redirect( url_for("homepage") )
/app/csrf_protect.py
from . import app from .utils import hash_generator @app.before_request def csrf_protect(): if request.method == "POST": token = session.pop("_csrf_token", None); if not token or token != request.form.get("_csrf_token"): abort(403); def generate_csrf_token(): if "_csrf_token" not in session: session["_csrf_token"] = hash_generator(30); return session["_csrf_token"]; app.jinja_env.globals['csrf_token'] = generate_csrf_token
/app/forms.py
# def? class LoginForm(): """ For the login forms """
/app/models.py
from app import db from werkzeug.security import generate_password_hash, \ check_password_hash class SimpleModel(db.Model): id = db.Column(db.Integer, index=True, primary_key=True) def __repr__(self): return "<ID #%r>" % (self.id);
/app/utils.py
# Any utility functions would go here import string, random def hash_generator(size=8, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size))
/app/views.py
import os, datetime from app import app, db, logger from flask import render_template, flash, redirect, session, url_for, \ request, g, send_from_directory # - - - - - - - Custom Routing @app.errorhandler(404) def error_404(): # Haven't actually tried this yet return render_template("404.html") # - - - - - - - Main Routes @app.route("/") @app.route("/index",methods=['GET']) def main_page(): # Display main login page return render_template("index.html")
/config.py
import os SECRET_KEY = "beautiful_little_world_is_mine" BASEDIR = os.path.abspath(os.path.dirname(__file__)) # = = = = = = = For the Database Configuration # Separate out into a 'db_config.py' for larger projects if os.environ.get('DATABASE_URI') is None: DATABASE_URL = 'sqlite:///' + os.path.join(BASEDIR, 'app.db') SQLALCHEMY_MIGRATE_REPO = os.path.join(BASEDIR, 'db_repository') SQLALCHEMY_TRACK_MODIFICATIONS = True; else: DATABASE_URL = os.environ['DATABASE_URI']; # could be mongodb """ Sample MongoDB Setup, if the URL is not specified as an environement variable MONGODB_DB = 'project1' MONGODB_HOST = '192.168.1.35' MONGODB_PORT = 12345 MONGODB_USERNAME = 'webapp' MONGODB_PASSWORD = 'pwd123' """ # - - - - - - - Put Constants here """ Examples can include bits like constants for folder storage """
/fabric_scripts/fabfile.py
# Python script: run with `$ fab ___command___` # Imports from nearby files from fabric.api import * from setup_server import * # Environmental Stuff env.hosts = [ 'server.domaind.tld', # name or ip address of server ] env.user = 'root' # name of user, you'll have to supply password at execution
/fabric_scripts/setup_server.py
from fabric.api import * import string,random def update_upgrade(): """Updates & Upgrades the server""" sudo("apt-get update"); sudo("apt-get -y upgrade"); def install_nginx(): """Install nginx""" sudo("apt-get install nginx"); def install_python(): """Install python, dev, etc.""" sudo("apt-get install python python-dev python-pip libssl-dev "\ "libffi-dev htop munin"); def create_deploy_user(): """creates a user for deploying a website & copy a git template over""" sudo("adduser deploy"); run("git clone http://github.com/valexandersaulys/flask-ladder"); run("virtualenv .venv"); sudo("chown deploy:deploy .venv/"); sudo("chown deploy:deploy flask-ladder/"); run(".venv/bin/pip install -r flask-ladder/requirements.txt"); def install_mysql(): """ Installs and configures MySQL for 'deploy' user """ sudo("apt-get install mysql-server mysql-client"); password = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12)); sudo("mysql --execute=\"CREATE USER 'deploy'@'localhost' IDENTIFIED BY '"+password+"';\""); # Run some mysql commands for setup sudo("mysql --execute=\"CREATE DATABASE deployment;\""); sudo("mysql --execute=\"CREATE DATABASE development;\""); sudo("mysql --execute=\"GRANT ALL PRIVILEGES ON deployment.* to 'deploy'@'localhost';\""); sudo("mysql --execute=\"GRANT ALL PRIVILEGES ON development.* to 'deploy'@'localhost';\""); # Then add the mysql URI to the bashrc for deploy user bash_insert_string = "deploy:"+password+"@localhost"; # uri for flask sudo("echo 'DATABASE_URI=x"+bash_insert_string+"' >> /home/deploy/.bashrc"); def install_mongodb(): """ Installs and configures mongodb for 'deploy' user """ # Install MongoDB sudo("apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927"); run("echo "\ + "'deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse'"\ + "| sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list"); sudo("apt-get update"); sudo("apt-get install -y mongodb-org"); # Unfortuantely, creating and configuring a user will not be a one liner... """ db.createUser({ username:"", password:"", roles:[ { role:"dbAdmin or readWrite", db: "<name_of_database>" }, ... ] } ); db.getUsers(); # to get users show dbs # to show users db.updateUser("name_of_user", { roles: [ {role: '', db: '' },... ] }); """ def install_redis(): """ Installs Redis Run as main user """ print "Installing some base dependencies" sudo("apt-get update"); sudo("apt-get install -y build-essential tcl8.5"); print "Download with wget the redis installers for ubuntu"; run("wget http://download.redis.io/releases/redis-stable.tar.gz"); run("tar xzf redis-stable.tar.gz"); print "Download and run make for redis"; run("cd redis-stable"); run("make"); run("make test"); sudo("make install"); print "Run the included script to install" with cd("utils"): sudo("./install_server.sh"); sudo("service redis_6379 start"); print "Run setup at startup" sudo("update-rc.d redis_6379 defaults"); print "then setup security so that _only_ the localhost can access" sudo("echo 'bind 127.0.0.1' >> /etc/redis/6379.conf");
/manage.py
#!.venv/bin/python """ This can be pretty much kept straight """ from app import db, app from flask_script import Manager from flask_migrate import Migrate, MigrateCommand migrate = Migrate(app, db) manager = Manager(app) manager.add_command('db', MigrateCommand) if __name__=="__main__": manager.run()
/run.py
#!.venv/bin/python from app import app app.run(host='0.0.0.0') # Does __not__ run as a debug! For use with gunicorn
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
dmodena/projetoextensao
refs/heads/main
{"/core/views.py": ["/core/utils.py", "/core/forms.py", "/core/models.py"], "/core/forms.py": ["/core/models.py"]}
└── ├── core │ ├── forms.py │ ├── migrations │ │ └── 0001_initial.py │ ├── models.py │ ├── templatetags │ │ └── custom_filters.py │ ├── urls.py │ ├── utils.py │ └── views.py └── projetoextensao └── settings ├── dev.py └── prod.py
/core/forms.py
from django.forms import ModelForm from core.models import Edital, Aluno from django.contrib.auth.models import User from django.contrib.auth.forms import UserCreationForm class EditalForm(ModelForm): class Meta: model = Edital fields = ['titulo', 'descricao', 'inicio_inscricoes', 'fim_inscricoes', 'inicio_curso', 'fim_curso', 'vagas', 'pre_requisitos', 'edital_link', 'carga_horaria', 'cidade', 'ativo'] class AlunoForm(ModelForm): class Meta: model = Aluno fields = ['nome', 'logradouro', 'numero', 'complemento', 'cep', 'cidade', 'estado', 'rg', 'cpf', 'email', 'telefone', 'nascimento', 'ativo']
/core/migrations/0001_initial.py
# Generated by Django 2.0.1 on 2018-03-15 16:16 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Aluno', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('nome', models.CharField(max_length=100)), ('logradouro', models.CharField(max_length=100)), ('numero', models.CharField(max_length=10)), ('complemento', models.TextField(blank=True, default='')), ('cep', models.CharField(max_length=10)), ('cidade', models.CharField(max_length=100)), ('estado', models.CharField(max_length=2)), ('rg', models.CharField(max_length=20)), ('cpf', models.CharField(max_length=20, unique=True)), ('email', models.CharField(max_length=30)), ('telefone', models.CharField(max_length=20)), ('nascimento', models.DateField()), ('ativo', models.BooleanField(default=True)), ('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Edital', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('titulo', models.CharField(max_length=100)), ('descricao', models.TextField()), ('inicio_inscricoes', models.DateField()), ('fim_inscricoes', models.DateField()), ('inicio_curso', models.DateField()), ('fim_curso', models.DateField()), ('vagas', models.IntegerField()), ('pre_requisitos', models.TextField()), ('edital_link', models.CharField(max_length=100)), ('carga_horaria', models.IntegerField(default=1)), ('cidade', models.CharField(max_length=100)), ('ativo', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='Inscrito', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('inscrito_em', models.DateTimeField(default=django.utils.timezone.now)), ('matriculado_em', models.DateTimeField(null=True)), ('aprovado_em', models.DateTimeField(null=True)), ('reprovado_em', models.DateTimeField(null=True)), ('status', models.IntegerField(default=0)), ('observacoes', models.TextField()), ('aluno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Aluno')), ('edital', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Edital')), ], ), ]
/core/models.py
from django.db import models from django.utils import timezone from django.contrib.auth.models import User class Edital(models.Model): titulo = models.CharField(max_length = 100) descricao = models.TextField() inicio_inscricoes = models.DateField() fim_inscricoes = models.DateField() inicio_curso = models.DateField() fim_curso = models.DateField() vagas = models.IntegerField() pre_requisitos = models.TextField() edital_link = models.CharField(max_length = 100) carga_horaria = models.IntegerField(default = 1) cidade = models.CharField(max_length = 100) ativo = models.BooleanField(default = True) def __str__(self): return self.titulo class Aluno(models.Model): nome = models.CharField(max_length = 100) logradouro = models.CharField(max_length = 100) numero = models.CharField(max_length = 10) complemento = models.TextField(blank = True, default = '') cep = models.CharField(max_length = 10) cidade = models.CharField(max_length = 100) estado = models.CharField(max_length = 2) rg = models.CharField(max_length = 20) cpf = models.CharField(unique = True, max_length = 20) email = models.CharField(max_length = 30) telefone = models.CharField(max_length = 20) nascimento = models.DateField() created_by = models.ForeignKey(User, null = True, on_delete = models.SET_NULL) ativo = models.BooleanField(default = True) def __str__(self): return self.nome class Inscrito(models.Model): inscrito_em = models.DateTimeField(default = timezone.now) matriculado_em = models.DateTimeField(null = True) aprovado_em = models.DateTimeField(null = True) reprovado_em = models.DateTimeField(null = True) aluno = models.ForeignKey(Aluno, on_delete = models.CASCADE) edital = models.ForeignKey(Edital, on_delete = models.CASCADE) status = models.IntegerField(default = 0) observacoes = models.TextField()
/core/templatetags/custom_filters.py
from django import template register = template.Library() @register.filter def date_mask(value): return value.strftime('%d/%m/%Y') @register.filter def datefull_mask(value): mes = "" if value.month == 1: mes = "Janeiro" elif value.month == 2: mes = "Fevereiro" elif value.month == 3: mes = "Março" elif value.month == 4: mes = "Abril" elif value.month == 5: mes = "Maio" elif value.month == 6: mes = "Junho" elif value.month == 7: mes = "Julho" elif value.month == 8: mes = "Agosto" elif value.month == 9: mes = "Setembro" elif value.month == 10: mes = "Outubro" elif value.month == 11: mes = "Novembro" elif value.month == 12: mes = "Dezembro" return value.strftime('%d de ') + str(mes) + value.strftime(' de %Y') @register.filter def datetime_mask(value): return value.strftime('%d/%m/%Y %H:%M')
/core/urls.py
from django.urls import path from core import views urlpatterns = [ path('', views.r_editais), path('editais/', views.editais, name='editais'), path('editais/novo/', views.edital_create, name='editais/novo'), path('editais/editar/<id>', views.edital_edit, name='editais/editar'), path('editais/excluir/<id>', views.edital_remove, name='editais/excluir'), path('alunos/', views.alunos, name='alunos'), path('alunos/novo/', views.aluno_create, name='alunos/novo'), path('alunos/editar/<id>', views.aluno_edit, name='alunos/editar'), path('inscricoes/', views.inscricoes, name='inscricoes'), path('inscricoes/edital/<id>', views.inscricoes_edital, name='inscricoes/edital'), path('inscricoes/aluno/', views.inscricoes_aluno, name='inscricoes/aluno'), path('inscricoes/edital/novo/<id>', views.inscricoes_create, name='inscricao/nova'), path('inscricoes/matricular/<id>', views.inscricoes_matricular, name='inscricao/matricular'), path('inscricoes/cancelar/<id>', views.inscricoes_cancelar, name='inscricao/cancelar'), path('inscricoes/aprovar/<id>', views.inscricoes_aprovar, name='inscricao/aprovar'), path('inscricoes/reprovar/<id>', views.inscricoes_reprovar, name='inscricao/reprovar'), path('inscricoes/excluir/<id>', views.inscricoes_remove, name='inscricao/excluir'), path('certificados/<id>', views.certificado, name='certificado'), path('registrar/', views.signup, name='registrar'), ]
/core/utils.py
from django.conf import settings def static_files_url(): return settings.STATIC_FILES_URL
/core/views.py
from django.shortcuts import render, redirect from core.models import Edital, Aluno, Inscrito from core.forms import EditalForm, AlunoForm from core.utils import static_files_url from django.utils import timezone from django.contrib.auth import login, authenticate from django.contrib.auth.forms import UserCreationForm # from django.core.email import send_email def home(request): return render(request, 'core/index.html') def editais(request, mensagem = None): editais = Edital.objects.all().order_by('-id') return render(request, 'core/editais/lista.html', {'editais': editais, 'mensagem': mensagem, 'static_url': static_files_url}) def r_editais(request): return redirect(editais) def edital_create(request): form = EditalForm(request.POST or None) if request.method == 'POST': if form.is_valid() and request.user.is_staff: form.save() return redirect(editais) return render(request, 'core/editais/novo.html', {'form': form, 'static_url': static_files_url}) def edital_edit(request, id): edital = Edital.objects.get(id=id) form = EditalForm(request.POST or None, instance = edital) if form.is_valid() and request.user.is_staff: form.save() return redirect(editais) return render(request, 'core/editais/novo.html', {'form': form, 'static_url': static_files_url}) def edital_remove(request, id): edital = Edital.objects.get(id=id) if request.user.is_staff: edital.delete() return redirect(editais) def alunos(request): if request.user.is_authenticated: if request.user.is_staff: alunos = Aluno.objects.all().order_by('nome') else: alunos = Aluno.objects.all().filter(created_by=request.user) return render(request, 'core/alunos/lista.html', {'alunos':alunos, 'static_url': static_files_url}) return redirect(editais) def aluno_create(request): form = AlunoForm(request.POST or None) if request.method == 'POST': if form.is_valid() and request.user.is_authenticated: aluno = form.save() aluno.created_by = request.user aluno.save() return redirect(alunos) return render(request, 'core/alunos/novo.html', {'form': form, 'static_url': static_files_url}) def aluno_edit(request, id): aluno = Aluno.objects.get(id=id) form = AlunoForm(request.POST or None, instance = aluno) if form.is_valid() and request.user.is_authenticated: form.save() return redirect(alunos) return render(request, 'core/alunos/novo.html', {'form': form, 'static_url': static_files_url}) def inscricoes(request): editais = Edital.objects.all().order_by('titulo') return render(request, 'core/inscricoes/lista.html', {'editais': editais, 'static_url': static_files_url}) def inscricoes_edital(request, id): edital = Edital.objects.get(id=id) inscritos = Inscrito.objects.filter(edital=edital).order_by('inscrito_em') return render(request, 'core/inscricoes/edital.html', {'inscritos': inscritos, 'static_url': static_files_url}) def inscricoes_aluno(request): aluno = Aluno.objects.get(created_by=request.user) inscritos = Inscrito.objects.filter(aluno=aluno).order_by('inscrito_em') return render(request, 'core/inscricoes/aluno.html', {'inscritos': inscritos, 'static_url': static_files_url}) def inscricoes_create(request, id): edital = Edital.objects.get(id=id) aluno = Aluno.objects.get(created_by=request.user) if aluno == None: return redirect(editais) qtd_inscricoes = Inscrito.objects.filter(aluno=aluno, edital=edital).count() if qtd_inscricoes > 0: mensagem = "Aluno já inscrito!" return editais(request, mensagem) inscrito = Inscrito() inscrito.aluno = aluno inscrito.edital = edital inscrito.status = 1 inscrito.save() mensagem = "Inscrição realizada com sucesso!" # send_email('SisExtensão - Inscrição em curso', 'Obrigado por se inscrever em um de nosso cursos! Aguarde a confirmação de sua matrícula.', 'sisextensao@example.com', [inscrito.email]) return editais(request, mensagem) def inscricoes_matricular(request, id): inscrito = Inscrito.objects.get(id=id) edital = inscrito.edital inscrito.status = 2 inscrito.matriculado_em = timezone.now() inscrito.save() # send_email('SisExtensão - Matrícula em curso', 'Parabéns! Você foi selecionado para iniciar seu curso! Entre em contato com o Campus para realizar sua matrícula.', 'sisextensao@example.com', [inscrito.email]) return redirect('inscricoes/edital', id=edital.id) def inscricoes_cancelar(request, id): inscrito = Inscrito.objects.get(id=id) edital = inscrito.edital inscrito.status = 1 inscrito.matriculado_em = None inscrito.save() return redirect('inscricoes/edital', id=edital.id) def inscricoes_aprovar(request, id): inscrito = Inscrito.objects.get(id=id) edital = inscrito.edital inscrito.status = 3 inscrito.aprovado_em = timezone.now() inscrito.save() return redirect('inscricoes/edital', id=edital.id) def inscricoes_reprovar(request, id): inscrito = Inscrito.objects.get(id=id) edital = inscrito.edital inscrito.status = 4 inscrito.reprovado_em = timezone.now() inscrito.save() return redirect('inscricoes/edital', id=edital.id) def inscricoes_remove(request, id): inscrito = Inscrito.objects.get(id=id) inscrito.delete() return redirect('inscricoes/aluno') def certificado(request, id): inscrito = Inscrito.objects.get(id=id) edital = Edital.objects.get(id=inscrito.edital.id) aluno = Aluno.objects.get(id=inscrito.aluno.id) return render(request, 'core/certificados/certificado.html', {'inscrito': inscrito, 'edital': edital, 'aluno': aluno, 'static_url': static_files_url}) def signup(request): if request.method == 'POST': form_user = UserCreationForm(request.POST) form_aluno = AlunoForm(request.POST) if form_user.is_valid() and form_aluno.is_valid(): usuario = form_user.save() aluno = form_aluno.save() aluno.created_by = usuario aluno.save() username = form_user.cleaned_data.get('username') raw_password = form_user.cleaned_data.get('password1') user = authenticate(username=username, password=raw_password) login(request, user) return redirect(editais) else: form_user = UserCreationForm() form_aluno = AlunoForm() return render(request, 'core/signup.html', {'form_user': form_user, 'form_aluno': form_aluno, 'static_url': static_files_url})
/projetoextensao/settings/dev.py
from projetoextensao.settings.base import * from decouple import config DEBUG = True ALLOWED_HOSTS = [] SECRET_KEY = config('SECRET_KEY') # Base url for static files STATIC_FILES_URL = config('STATIC_FILES_URL')
/projetoextensao/settings/prod.py
from projetoextensao.settings.base import * import dj_database_url DEBUG = False ALLOWED_HOSTS = ['projetoextensao.herokuapp.com'] SECRET_KEY = os.environ['DJANGO_KEY'] DATABASES['default'] = dj_database_url.config() # Base url for static files STATIC_FILES_URL = os.environ['STATIC_FILES_URL']
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
pratikaher88/BookStoreProject
refs/heads/master
{"/cart/views.py": ["/coreapp/models.py", "/coreapp/forms.py"], "/coreapp/admin.py": ["/coreapp/models.py"], "/coreapp/forms.py": ["/coreapp/models.py"], "/coreapp/views.py": ["/coreapp/models.py", "/coreapp/forms.py"], "/search/forms.py": ["/coreapp/models.py"], "/search/views.py": ["/coreapp/models.py", "/search/forms.py"], "/transaction/views.py": ["/coreapp/models.py", "/coreapp/forms.py"], "/nofapapp/context_processors.py": ["/coreapp/models.py", "/coreapp/forms.py"], "/transaction/transaction/templatetags/user_books_tag.py": ["/coreapp/models.py"]}
└── ├── cart │ ├── migrations │ │ ├── 0001_initial.py │ │ ├── 0002_auto_20190105_0448.py │ │ └── 0003_auto_20190105_0511.py │ ├── urls.py │ └── views.py ├── coreapp │ ├── admin.py │ ├── forms.py │ ├── migrations │ │ ├── 0003_auto_20181231_1340.py │ │ ├── 0022_auto_20190104_0550.py │ │ ├── 0024_auto_20190104_0743.py │ │ ├── 0025_auto_20190104_0748.py │ │ ├── 0026_order.py │ │ ├── 0027_auto_20190105_0523.py │ │ ├── 0030_auto_20190105_0549.py │ │ ├── 0033_auto_20190105_2116.py │ │ ├── 0035_oldrequests_requests_shippingaddress_transaction.py │ │ ├── 0036_auto_20190111_0747.py │ │ ├── 0042_auto_20190111_1406.py │ │ ├── 0043_auto_20190111_1551.py │ │ ├── 0044_auto_20190112_0402.py │ │ ├── 0047_auto_20190112_0429.py │ │ ├── 0049_remove_shippingaddress_country.py │ │ ├── 0051_auto_20190116_0910.py │ │ ├── 0053_auto_20190116_1608.py │ │ ├── 0054_finalbuyorder.py │ │ ├── 0056_auto_20190117_1332.py │ │ ├── 0057_auto_20190117_1338.py │ │ ├── 0059_auto_20190118_1650.py │ │ ├── 0061_auto_20190119_1345.py │ │ ├── 0062_auto_20190119_1352.py │ │ ├── 0063_auto_20190119_1409.py │ │ ├── 0064_auto_20190119_1412.py │ │ ├── 0065_auto_20190119_1415.py │ │ ├── 0066_finalbuyorder_total_price.py │ │ ├── 0067_auto_20190119_1549.py │ │ ├── 0068_auto_20190120_1419.py │ │ ├── 0069_completedbuyorder_completedtransaction.py │ │ ├── 0070_auto_20190125_0250.py │ │ ├── 0071_auto_20190125_0309.py │ │ ├── 0073_auto_20190125_0324.py │ │ ├── 0074_book_image_url.py │ │ ├── 0075_auto_20190126_1146.py │ │ ├── 0076_auto_20190129_0437.py │ │ ├── 0077_auto_20190129_0438.py │ │ ├── 0078_auto_20190129_1355.py │ │ └── 0081_auto_20190207_1821.py │ ├── models.py │ ├── urls.py │ └── views.py ├── nofapapp │ ├── context_processors.py │ └── urls.py ├── search │ ├── forms.py │ ├── urls.py │ └── views.py └── transaction ├── transaction │ └── templatetags │ └── user_books_tag.py ├── urls.py └── views.py
/cart/migrations/0001_initial.py
# Generated by Django 2.1.4 on 2019-01-04 05:50 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('coreapp', '0022_auto_20190104_0550'), ] operations = [ migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_ordered', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='OrderItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_added', models.DateTimeField(auto_now=True)), ('book', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='coreapp.Book')), ], ), migrations.AddField( model_name='order', name='items', field=models.ManyToManyField(to='cart.OrderItem'), ), migrations.AddField( model_name='order', name='owner', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='coreapp.Profile'), ), ]
/cart/migrations/0002_auto_20190105_0448.py
# Generated by Django 2.1.4 on 2019-01-05 04:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cart', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='orderitem', name='book', ), migrations.AlterField( model_name='order', name='items', field=models.ManyToManyField(to='coreapp.Book'), ), migrations.DeleteModel( name='OrderItem', ), ]
/cart/migrations/0003_auto_20190105_0511.py
# Generated by Django 2.1.4 on 2019-01-05 05:11 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('cart', '0002_auto_20190105_0448'), ] operations = [ migrations.RemoveField( model_name='order', name='items', ), migrations.RemoveField( model_name='order', name='owner', ), migrations.DeleteModel( name='Order', ), ]
/cart/urls.py
from cart import views from django.urls import path app_name='cart' urlpatterns = [ path('<int:item_id>/add', views.add_to_cart, name='add_to_cart'), path('cart/', views.cart_list_entries_view, name='cart_items'), path('<int:item_id>/delete', views.delete_from_cart, name='delete_item'), path('cart/<int:pk>/delete', views.FinalBuyOrderDeleteView.as_view(), name='finalorder-delete'), ]
/cart/views.py
from django.shortcuts import render from django.shortcuts import render, redirect, get_object_or_404 from django.views import generic from django.urls import reverse_lazy, reverse from django.contrib import messages from django.contrib.auth.decorators import login_required from coreapp.models import Book, Profile, Order, ShippingAddress, FinalBuyOrder from coreapp.forms import ShippingAddressForm from django.db.models import Q from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.db.models import Sum @login_required def add_to_cart(request, item_id): book = get_object_or_404(Book,id=item_id) user_order, status = Order.objects.get_or_create( owner=request.user.profile) if book in user_order.get_cart_items(): messages.warning(request, 'Item Already in Cart!') return redirect(reverse('coreapp:buy_entries')) user_order.items.add(book) user_order.save() messages.info(request, "Item added to cart!") return redirect(reverse('coreapp:buy_entries')) @login_required def delete_from_cart(request, item_id): book = get_object_or_404(Book,id=item_id) orders = Order.objects.get(owner=request.user.profile) orders.items.remove(book) messages.info(request, "Item has been deleted") return redirect(reverse('cart:cart_items')) @login_required def cart_list_entries_view(request): orders = Order.objects.get(owner=request.user.profile) address_form = ShippingAddressForm( request.POST, instance=request.user.profile.address) user_address = get_object_or_404( ShippingAddress, profile=request.user.profile) orderitems = Order.objects.get(owner=request.user.profile) orders = orderitems.get_cart_items() total_price = Order.objects.filter( owner=request.user.profile).aggregate(Sum('items__price')) total_price = (list(total_price.values())[0]) if request.method == "POST" and 'Yes' in request.POST: for order in orders: seller_profile = get_object_or_404(Profile, user=order.user) seller_address = get_object_or_404( ShippingAddress, profile=seller_profile) FinalBuyOrder.objects.create(user=request.user, book=order, seller=order.user, useraddress=user_address, selleraddress=seller_address, total_price=total_price+20) orderitems.items.remove(order) messages.success(request, ('Item successfully Ordered!')) return redirect('transaction:orders_view') if request.method == 'POST' and 'updateadd' in request.POST: if address_form.is_valid(): address_form.save() messages.success(request, ('Address successfully updated!')) return redirect('cart:cart_items') context = {'orders': orders, 'address': user_address, 'address_form': address_form, 'total_price': total_price} return render(request, 'cart_list_entries.html', context) class FinalBuyOrderDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView): model = FinalBuyOrder success_url = reverse_lazy('transaction:orders_view') template_name = 'final_order_confirm_delete.html' def test_func(self): order = self.get_object() if self.request.user == order.user: return True return False
/coreapp/admin.py
from django.contrib.admin import actions from django.contrib import admin from coreapp.models import Book, Profile, UserCollection, Order, Requests, Transaction, ShippingAddress, FinalBuyOrder, OldRequests, CompletedBuyOrder, CompletedTransaction from import_export.admin import ImportExportModelAdmin from django.contrib import admin from coreapp.models import delete_transaction_email, send_buyorder_email, send_completed_buy_order_email, delete_buyorder_email from django.db.models import signals # Register your models here. admin.site.register(CompletedBuyOrder) admin.site.register(CompletedTransaction) # class TransactionAdmin(ImportExportModelAdmin): # pass def delete_selected_exchange_order(modeladmin, request, queryset): signals.pre_delete.disconnect( delete_transaction_email, sender=Transaction) for obj in queryset: CompletedTransaction.objects.create(requester=obj.requester, offerrer=obj.offerrer, requester_book_name=obj.requester_book.book_name, offerrer_book_name=obj.offerrer_book.book_name, requester_author_name=obj.requester_book.author_name, offerrer_author_name=obj.offerrer_book.author_name, requester_address=obj.requester_address, offerrer_address=obj.offerrer_address) obj.requester_book.delete() obj.offerrer_book.delete() obj.delete() signals.pre_delete.connect( delete_transaction_email, sender=Transaction) delete_selected_exchange_order.short_description = "Delete Exchange Order on Completion (Select This)" def delete_selected_exchange_order_without_notifications(modeladmin, request, queryset): signals.pre_delete.disconnect( delete_transaction_email, sender=Transaction) for obj in queryset: obj.delete() signals.pre_delete.connect( delete_transaction_email, sender=Transaction) delete_selected_exchange_order_without_notifications.short_description = "Delete Exchange Order without Notifications" class TransactionAdmin(admin.ModelAdmin): actions = [delete_selected_exchange_order, delete_selected_exchange_order_without_notifications] admin.site.register(Transaction, TransactionAdmin) def delete_selected_buy_order(modeladmin, request, queryset): signals.pre_delete.disconnect( delete_buyorder_email, sender=FinalBuyOrder) for obj in queryset: CompletedBuyOrder.objects.create(user=obj.user,book_name=obj.book.book_name, author_name=obj.book.author_name, seller=obj.seller, useraddress=obj.useraddress, selleraddress=obj.selleraddress, total_price=obj.total_price) obj.book.delete() obj.delete() signals.pre_delete.connect( delete_buyorder_email, sender=FinalBuyOrder) delete_selected_buy_order.short_description = "Delete Selected Buy Order on Completion (Select This)" def delete_selected_buy_order_without_notifications(modeladmin, request, queryset): signals.pre_delete.disconnect( delete_buyorder_email, sender=FinalBuyOrder) for obj in queryset: obj.delete() signals.pre_delete.connect( delete_buyorder_email, sender=FinalBuyOrder) delete_selected_buy_order_without_notifications.short_description = "Delete Selected without sending notifications" class FinalBuyOrderAdmin(admin.ModelAdmin): actions = [delete_selected_buy_order, delete_selected_buy_order_without_notifications] admin.site.register(FinalBuyOrder, FinalBuyOrderAdmin) admin.site.register(Book) admin.site.register(Profile) admin.site.register(Order) admin.site.register(OldRequests) admin.site.register(UserCollection) admin.site.register(Requests) admin.site.register(ShippingAddress)
/coreapp/forms.py
from django import forms from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from coreapp.models import Book, Profile, ShippingAddress from dal import autocomplete class UserCreationForm(UserCreationForm): email = forms.EmailField(required=True, label='Email') class Meta: model = User fields = ("username", "email", "password1", "password2") def save(self, commit=True): user = super(UserCreationForm, self).save(commit=False) user.email = self.cleaned_data["email"] if commit: user.save() return user class NewEntryForm(forms.ModelForm): class Meta: model = Book fields = ['book_name','author_name', 'description', 'sell_or_exchange', 'price', 'condition','image_url'] widgets = { 'description': forms.Textarea(attrs={'rows': 4, 'cols': 15}), } def __init__(self, *args, **kwargs): super(NewEntryForm, self).__init__(*args, **kwargs) # self.fields['sell_or_exchange'].widget = forms.ChoiceField(attrs={ # 'id': 'sellorexchangeid', # }) self.fields['price'].widget = forms.TextInput(attrs={ 'type': 'number', 'id': 'priceid', }) class ShippingAddressForm(forms.ModelForm): class Meta: model = ShippingAddress fields = '__all__' exclude = ('profile',) field_order = ['flatnumber', 'address1', 'address2', 'zip_code','city', 'phone_number'] class UserForm(forms.ModelForm): class Meta: model = User fields = ['username', 'email'] class ProfileForm(forms.ModelForm): class Meta: model = Profile fields = ['profile_pic', ]
/coreapp/migrations/0003_auto_20181231_1340.py
# Generated by Django 2.1.4 on 2018-12-31 13:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0002_book_price'), ] operations = [ migrations.AddField( model_name='book', name='condition', field=models.CharField(choices=[('Acceptable', 'Acceptable'), ('Bad', 'Bad'), ('Good', 'Good')], default='Acceptable', max_length=100), ), migrations.AlterField( model_name='book', name='price', field=models.IntegerField(max_length=5), ), ]
/coreapp/migrations/0022_auto_20190104_0550.py
# Generated by Django 2.1.4 on 2019-01-04 05:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0021_auto_20190104_0332'), ] operations = [ migrations.RemoveField( model_name='order', name='items', ), migrations.RemoveField( model_name='order', name='owner', ), migrations.RemoveField( model_name='orderitem', name='book', ), migrations.AlterField( model_name='profile', name='profile_pic', field=models.ImageField(default='default.png', upload_to='profile_images/'), ), migrations.DeleteModel( name='Order', ), migrations.DeleteModel( name='OrderItem', ), ]
/coreapp/migrations/0024_auto_20190104_0743.py
# Generated by Django 2.1.4 on 2019-01-04 07:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0023_usercollection'), ] operations = [ migrations.CreateModel( name='UserCollectionItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_added', models.DateTimeField(auto_now=True)), ('book', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='coreapp.Book')), ], ), migrations.AlterField( model_name='usercollection', name='items', field=models.ManyToManyField(to='coreapp.UserCollectionItem'), ), ]
/coreapp/migrations/0025_auto_20190104_0748.py
# Generated by Django 2.1.4 on 2019-01-04 07:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0024_auto_20190104_0743'), ] operations = [ migrations.RenameField( model_name='usercollection', old_name='user', new_name='owner', ), ]
/coreapp/migrations/0026_order.py
# Generated by Django 2.1.4 on 2019-01-05 05:11 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0025_auto_20190104_0748'), ] operations = [ migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_ordered', models.DateTimeField(auto_now=True)), ('items', models.ManyToManyField(to='coreapp.Book')), ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='coreapp.Profile')), ], ), ]
/coreapp/migrations/0027_auto_20190105_0523.py
# Generated by Django 2.1.4 on 2019-01-05 05:23 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0026_order'), ] operations = [ migrations.RemoveField( model_name='order', name='items', ), migrations.AddField( model_name='order', name='items', field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='coreapp.Book'), ), ]
/coreapp/migrations/0030_auto_20190105_0549.py
# Generated by Django 2.1.4 on 2019-01-05 05:49 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0029_auto_20190105_0530'), ] operations = [ migrations.RemoveField( model_name='usercollectionitem', name='book', ), migrations.RemoveField( model_name='usercollection', name='items', ), migrations.AddField( model_name='usercollection', name='books', field=models.ManyToManyField(to='coreapp.Book'), ), migrations.AddField( model_name='usercollection', name='date_ordered', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='order', name='owner', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='coreapp.Profile'), ), migrations.AlterField( model_name='usercollection', name='owner', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='coreapp.Profile'), ), migrations.DeleteModel( name='UserCollectionItem', ), ]
/coreapp/migrations/0033_auto_20190105_2116.py
# Generated by Django 2.1.4 on 2019-01-05 21:16 import coreapp.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0032_auto_20190105_0613'), ] operations = [ migrations.AlterField( model_name='book', name='image', field=models.ImageField(blank=True, default=coreapp.models.random_img, null=True, upload_to='book_images/'), ), ]
/coreapp/migrations/0035_oldrequests_requests_shippingaddress_transaction.py
# Generated by Django 2.1.4 on 2019-01-08 15:24 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('coreapp', '0034_auto_20190105_2123'), ] operations = [ migrations.CreateModel( name='OldRequests', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('offerrer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='old_from_user', to=settings.AUTH_USER_MODEL)), ('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='old_to_user', to=settings.AUTH_USER_MODEL)), ('requester_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='old_requester_book_from_user', to='coreapp.Book')), ], ), migrations.CreateModel( name='Requests', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('offerrer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_user', to=settings.AUTH_USER_MODEL)), ('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_user', to=settings.AUTH_USER_MODEL)), ('requester_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requester_book_from_user', to='coreapp.Book')), ], ), migrations.CreateModel( name='ShippingAddress', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('flatnumber', models.CharField(max_length=100, verbose_name='Flat Number')), ('address1', models.CharField(max_length=200, verbose_name='Address line 1')), ('address2', models.CharField(max_length=200, verbose_name='Address line 2')), ('zip_code', models.CharField(choices=[('421202', '421202'), ('421201', '421201'), ('421203', '421203')], default='421201', max_length=100)), ('city', models.CharField(max_length=100, verbose_name='City')), ('country', models.CharField(max_length=100, verbose_name='Country')), ], options={ 'verbose_name': 'Shipping Address', 'verbose_name_plural': 'Shipping Addresses', }, ), migrations.CreateModel( name='Transaction', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('offerer_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offerrer_book_from_user', to='coreapp.Book')), ('offerrer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offerrer', to=settings.AUTH_USER_MODEL)), ('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requester', to=settings.AUTH_USER_MODEL)), ('requester_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requested_book_from_user', to='coreapp.Book')), ], ), ]
/coreapp/migrations/0036_auto_20190111_0747.py
# Generated by Django 2.1.4 on 2019-01-11 07:47 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0035_oldrequests_requests_shippingaddress_transaction'), ] operations = [ migrations.RenameField( model_name='transaction', old_name='offerer_book', new_name='offerrer_book', ), ]
/coreapp/migrations/0042_auto_20190111_1406.py
# Generated by Django 2.1.4 on 2019-01-11 14:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0041_auto_20190111_1357'), ] operations = [ migrations.AlterField( model_name='shippingaddress', name='address1', field=models.CharField(max_length=500, verbose_name='Address line 1'), ), migrations.AlterField( model_name='shippingaddress', name='address2', field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Address line 2'), ), ]
/coreapp/migrations/0043_auto_20190111_1551.py
# Generated by Django 2.1.4 on 2019-01-11 15:51 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0042_auto_20190111_1406'), ] operations = [ migrations.AlterField( model_name='shippingaddress', name='profile', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='address', to='coreapp.Profile'), ), ]
/coreapp/migrations/0044_auto_20190112_0402.py
# Generated by Django 2.1.4 on 2019-01-12 04:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0043_auto_20190111_1551'), ] operations = [ migrations.AlterField( model_name='shippingaddress', name='zip_code', field=models.CharField(choices=[('421202', '421202'), ('421201', '421201'), ('421203', '421203')], max_length=100), ), ]
/coreapp/migrations/0047_auto_20190112_0429.py
# Generated by Django 2.1.4 on 2019-01-12 04:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0046_shippingaddress_phone_number'), ] operations = [ migrations.AlterField( model_name='shippingaddress', name='country', field=models.CharField(default='India', max_length=100, verbose_name='Country'), ), ]
/coreapp/migrations/0049_remove_shippingaddress_country.py
# Generated by Django 2.1.4 on 2019-01-12 04:38 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0048_auto_20190112_0433'), ] operations = [ migrations.RemoveField( model_name='shippingaddress', name='country', ), ]
/coreapp/migrations/0051_auto_20190116_0910.py
# Generated by Django 2.1.4 on 2019-01-16 09:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0050_book_buy_or_exchange'), ] operations = [ migrations.RemoveField( model_name='book', name='buy_or_exchange', ), migrations.AddField( model_name='book', name='sell_or_exchange', field=models.CharField(choices=[('Sell', 'Sell'), ('Exchange', 'Exchange')], default='Exchange', max_length=100), ), ]
/coreapp/migrations/0053_auto_20190116_1608.py
# Generated by Django 2.1.4 on 2019-01-16 16:08 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0052_auto_20190116_1519'), ] operations = [ migrations.AlterField( model_name='book', name='price', field=models.IntegerField(blank=True, null=True), ), ]
/coreapp/migrations/0054_finalbuyorder.py
# Generated by Django 2.1.4 on 2019-01-17 03:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('coreapp', '0053_auto_20190116_1608'), ] operations = [ migrations.CreateModel( name='FinalBuyOrder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('Book', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='coreapp.Book')), ('seller', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL)), ('selleraddres', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='selleraddress', to='coreapp.ShippingAddress')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)), ('useraddress', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='address', to='coreapp.ShippingAddress')), ], ), ]
/coreapp/migrations/0056_auto_20190117_1332.py
# Generated by Django 2.1.4 on 2019-01-17 13:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0055_auto_20190117_0425'), ] operations = [ migrations.RenameField( model_name='finalbuyorder', old_name='selleraddres', new_name='selleraddress', ), migrations.AddField( model_name='finalbuyorder', name='date_ordered', field=models.DateTimeField(auto_now=True), ), ]
/coreapp/migrations/0057_auto_20190117_1338.py
# Generated by Django 2.1.4 on 2019-01-17 13:38 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0056_auto_20190117_1332'), ] operations = [ migrations.RenameField( model_name='finalbuyorder', old_name='Book', new_name='book', ), ]
/coreapp/migrations/0059_auto_20190118_1650.py
# Generated by Django 2.1.4 on 2019-01-18 16:50 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0058_auto_20190118_1647'), ] operations = [ migrations.AlterField( model_name='finalbuyorder', name='book', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coreapp.Book'), ), migrations.AlterField( model_name='finalbuyorder', name='seller', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seller', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='finalbuyorder', name='selleraddress', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='selleraddress', to='coreapp.ShippingAddress'), ), migrations.AlterField( model_name='finalbuyorder', name='useraddress', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='address', to='coreapp.ShippingAddress'), ), ]
/coreapp/migrations/0061_auto_20190119_1345.py
# Generated by Django 2.1.4 on 2019-01-19 13:45 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('coreapp', '0060_auto_20190118_1651'), ] operations = [ migrations.AddField( model_name='transaction', name='selleraddress', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller_address', to='coreapp.ShippingAddress'), ), migrations.AddField( model_name='transaction', name='useraddress', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_address', to='coreapp.ShippingAddress'), ), migrations.AlterField( model_name='shippingaddress', name='zip_code', field=models.CharField(choices=[('421202', '421202'), ('421201', '421201'), ('421204', '421204'), ('421203', '421203'), ('421301', '421301')], default='421202', max_length=100), ), ]
/coreapp/migrations/0062_auto_20190119_1352.py
# Generated by Django 2.1.4 on 2019-01-19 13:52 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0061_auto_20190119_1345'), ] operations = [ migrations.RenameField( model_name='transaction', old_name='selleraddress', new_name='offerrer_address', ), migrations.RenameField( model_name='transaction', old_name='useraddress', new_name='requester_address', ), ]
/coreapp/migrations/0063_auto_20190119_1409.py
# Generated by Django 2.1.4 on 2019-01-19 14:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0062_auto_20190119_1352'), ] operations = [ migrations.AlterField( model_name='shippingaddress', name='zip_code', field=models.CharField(choices=[('421202', '421202'), ('421201', '421201'), ('421204', '421204'), ('421203', '421203'), ('421301', '421301')], default='421202', help_text='we only operate in these locations for now!', max_length=100), ), ]
/coreapp/migrations/0064_auto_20190119_1412.py
# Generated by Django 2.1.4 on 2019-01-19 14:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0063_auto_20190119_1409'), ] operations = [ migrations.AlterField( model_name='book', name='sell_or_exchange', field=models.CharField(choices=[('Sell', 'Sell'), ('Exchange', 'Exchange')], default='Exchange', help_text='By adding items to exchange you can make requests to ther users for exchange!', max_length=100), ), migrations.AlterField( model_name='shippingaddress', name='zip_code', field=models.CharField(choices=[('421202', '421202'), ('421201', '421201'), ('421204', '421204'), ('421203', '421203'), ('421301', '421301')], default='421202', help_text='We only operate in these locations for now!', max_length=100), ), ]
/coreapp/migrations/0065_auto_20190119_1415.py
# Generated by Django 2.1.4 on 2019-01-19 14:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0064_auto_20190119_1412'), ] operations = [ migrations.AlterField( model_name='book', name='book_name', field=models.CharField(help_text='We only deal with original books with ISBN codes, pirated books will not be accepted.', max_length=100), ), migrations.AlterField( model_name='book', name='sell_or_exchange', field=models.CharField(choices=[('Sell', 'Sell'), ('Exchange', 'Exchange')], default='Exchange', help_text='By adding items to exchange you can make requests to other users for exchange.', max_length=100), ), ]
/coreapp/migrations/0066_finalbuyorder_total_price.py
# Generated by Django 2.1.4 on 2019-01-19 14:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0065_auto_20190119_1415'), ] operations = [ migrations.AddField( model_name='finalbuyorder', name='total_price', field=models.IntegerField(null=True), ), ]
/coreapp/migrations/0067_auto_20190119_1549.py
# Generated by Django 2.1.4 on 2019-01-19 15:49 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0066_finalbuyorder_total_price'), ] operations = [ migrations.AlterField( model_name='book', name='image', field=models.ImageField(blank=True, null=True, upload_to='book_images/', verbose_name='Book Image'), ), migrations.AlterField( model_name='shippingaddress', name='phone_number', field=models.CharField(max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+9999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{10,15}$')]), ), ]
/coreapp/migrations/0068_auto_20190120_1419.py
# Generated by Django 2.1.4 on 2019-01-20 14:19 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0067_auto_20190119_1549'), ] operations = [ migrations.AlterField( model_name='book', name='price', field=models.IntegerField(blank=True, help_text='We would be deducting 20 rupees from item price for delivery purposes.', null=True), ), ]
/coreapp/migrations/0069_completedbuyorder_completedtransaction.py
# Generated by Django 2.1.4 on 2019-01-22 12:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('coreapp', '0068_auto_20190120_1419'), ] operations = [ migrations.CreateModel( name='CompletedBuyOrder', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_ordered', models.DateTimeField(auto_now_add=True)), ('total_price', models.IntegerField(null=True)), ('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coreapp.Book')), ('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_seller', to=settings.AUTH_USER_MODEL)), ('selleraddress', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_selleraddress', to='coreapp.ShippingAddress')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_user', to=settings.AUTH_USER_MODEL)), ('useraddress', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_address', to='coreapp.ShippingAddress')), ], ), migrations.CreateModel( name='CompletedTransaction', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('timestamp', models.DateTimeField(auto_now_add=True)), ('offerrer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_offerrer', to=settings.AUTH_USER_MODEL)), ('offerrer_address', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='completed_seller_address', to='coreapp.ShippingAddress')), ('offerrer_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_offerrer_book_from_user', to='coreapp.Book')), ('requester', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_requester', to=settings.AUTH_USER_MODEL)), ('requester_address', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='completed_user_address', to='coreapp.ShippingAddress')), ('requester_book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='completed_requested_book_from_user', to='coreapp.Book')), ], ), ]
/coreapp/migrations/0070_auto_20190125_0250.py
# Generated by Django 2.1.4 on 2019-01-25 02:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0069_completedbuyorder_completedtransaction'), ] operations = [ migrations.RemoveField( model_name='completedbuyorder', name='book', ), migrations.RemoveField( model_name='completedbuyorder', name='date_ordered', ), migrations.RemoveField( model_name='completedbuyorder', name='seller', ), migrations.RemoveField( model_name='completedbuyorder', name='selleraddress', ), migrations.RemoveField( model_name='completedbuyorder', name='total_price', ), migrations.RemoveField( model_name='completedbuyorder', name='user', ), migrations.RemoveField( model_name='completedbuyorder', name='useraddress', ), migrations.AddField( model_name='completedbuyorder', name='book_name', field=models.CharField(blank=True, max_length=100, null=True), ), ]
/coreapp/migrations/0071_auto_20190125_0309.py
# Generated by Django 2.1.4 on 2019-01-25 03:09 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('coreapp', '0070_auto_20190125_0250'), ] operations = [ migrations.AddField( model_name='completedbuyorder', name='author_name', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( model_name='completedbuyorder', name='date_ordered', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='completedbuyorder', name='seller', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='completed_seller', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='completedbuyorder', name='selleraddress', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='completed_selleraddress', to='coreapp.ShippingAddress'), ), migrations.AddField( model_name='completedbuyorder', name='total_price', field=models.IntegerField(null=True), ), migrations.AddField( model_name='completedbuyorder', name='useraddress', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='completed_address', to='coreapp.ShippingAddress'), ), ]
/coreapp/migrations/0073_auto_20190125_0324.py
# Generated by Django 2.1.4 on 2019-01-25 03:24 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0072_completedbuyorder_user'), ] operations = [ migrations.RemoveField( model_name='completedtransaction', name='offerrer_book', ), migrations.RemoveField( model_name='completedtransaction', name='requester_book', ), migrations.AddField( model_name='completedtransaction', name='offerrer_author_name', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( model_name='completedtransaction', name='offerrer_book_name', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( model_name='completedtransaction', name='requester_author_name', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AddField( model_name='completedtransaction', name='requester_book_name', field=models.CharField(blank=True, max_length=100, null=True), ), ]
/coreapp/migrations/0074_book_image_url.py
# Generated by Django 2.1.4 on 2019-01-26 11:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0073_auto_20190125_0324'), ] operations = [ migrations.AddField( model_name='book', name='image_url', field=models.CharField(blank=True, max_length=200, null=True), ), ]
/coreapp/migrations/0075_auto_20190126_1146.py
# Generated by Django 2.1.4 on 2019-01-26 11:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0074_book_image_url'), ] operations = [ migrations.AlterField( model_name='book', name='image_url', field=models.CharField(blank=True, max_length=500, null=True), ), ]
/coreapp/migrations/0076_auto_20190129_0437.py
# Generated by Django 2.1.4 on 2019-01-29 04:37 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0075_auto_20190126_1146'), ] operations = [ migrations.AlterModelOptions( name='requests', options={'verbose_name_plural': 'Requests'}, ), migrations.RemoveField( model_name='book', name='image', ), ]
/coreapp/migrations/0077_auto_20190129_0438.py
# Generated by Django 2.1.4 on 2019-01-29 04:38 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('coreapp', '0076_auto_20190129_0437'), ] operations = [ migrations.AlterModelOptions( name='oldrequests', options={'verbose_name_plural': 'Old Requests'}, ), ]
/coreapp/migrations/0078_auto_20190129_1355.py
# Generated by Django 2.1.4 on 2019-01-29 13:55 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0077_auto_20190129_0438'), ] operations = [ migrations.AlterField( model_name='book', name='condition', field=models.CharField(choices=[('Almost New', 'Almost New'), ('Acceptable', 'Acceptable'), ('Good', 'Good'), ('Bad', 'Bad')], default='Acceptable', max_length=100), ), migrations.AlterField( model_name='shippingaddress', name='phone_number', field=models.CharField(help_text='Enter your 10 digit phone number', max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+9999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{10,15}$')]), ), ]
/coreapp/migrations/0081_auto_20190207_1821.py
# Generated by Django 2.1.4 on 2019-02-07 18:21 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coreapp', '0080_auto_20190130_0336'), ] operations = [ migrations.AlterField( model_name='shippingaddress', name='phone_number', field=models.CharField(help_text='Enter your 10 digit phone number without any prefix code.', max_length=17, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+9999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{10,15}$')]), ), ]
/coreapp/models.py
from django.db import models from django.contrib.auth.models import User from django.db.models.signals import post_save, post_delete ,pre_delete from django.dispatch import receiver from PIL import Image from random import choice from os.path import join as path_join from os import listdir import os import requests from os.path import isfile from nofapapp.settings import BASE_DIR, MAILGUN_KEY, MAILGUN_REQUEST_URL, MSG_SMS_URL, MSG_SMS_AUTH_KEY from django.core.validators import RegexValidator from django.core.mail import EmailMessage from django.core.mail import send_mail import threading CONITION_CHOICES = ( ('Almost New', 'Almost New'), ('Acceptable', 'Acceptable'), ('Good', 'Good'), ('Bad', 'Bad'), ) ZIP_CHOICES = ( ('421202', '421202'), ('421201', '421201'), ('421204', '421204'), ('421203', '421203'), ('421301', '421301'), ) BUY_OR_EXCHANGE = ( ('Sell', 'Sell'), ('Exchange', 'Exchange'), ) def random_img(): dir_path = os.path.join(BASE_DIR, 'static/profile_pic') files = [content for content in listdir( dir_path) if isfile(path_join(dir_path, content))] return str(choice(files)) class Book(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE, null=True) book_name = models.CharField( max_length=100, help_text="We only deal with original books with ISBN codes, pirated books will not be accepted.") author_name = models.CharField(max_length=100, blank=True, null=True) description = models.CharField(max_length=2000, blank=True, null=True) # image = models.ImageField('Book Image', null=True, # blank=True, upload_to="book_images/") price = models.IntegerField( null=True, blank=True, help_text="We would be deducting 20 rupees from item price for delivery purposes.") sell_or_exchange = models.CharField( max_length=100, choices=BUY_OR_EXCHANGE, default='Exchange', help_text="By adding items to exchange you can make requests to other users for exchange.") condition = models.CharField( max_length=100, choices=CONITION_CHOICES, default='Acceptable') created_at = models.DateTimeField(auto_now_add=True) image_url = models.CharField(max_length=500,blank=True,null=True) class Meta: verbose_name = 'Book' verbose_name_plural = 'Books' def __str__(self): return self.book_name # def save(self, *args, **kwargs): # super(Book, self).save(*args, **kwargs) # if self.image: # img = Image.open(self.image.path) # output_size = (120, 120) # img.thumbnail(output_size) # img.save(self.image.path) class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) profile_pic = models.ImageField( default=random_img, upload_to="profile_images/") def __str__(self): return self.user.username def save(self, *args, **kwargs): super(Profile, self).save(*args, **kwargs) img = Image.open(self.profile_pic.path) if img.height > 300 or img.width > 300: output_size = (300, 300) img.thumbnail(output_size) img.save(self.profile_pic.path) class ShippingAddress(models.Model): profile = models.OneToOneField( Profile, on_delete=models.CASCADE, related_name='address') phone_regex = RegexValidator( regex=r'^\+?1?\d{10,15}$', message="Phone number must be entered in the format: '+9999999999'. Up to 15 digits allowed.") phone_number = models.CharField( validators=[phone_regex], max_length=17, help_text="Enter your 10 digit phone number without any prefix code.") flatnumber = models.CharField("Flat Number", max_length=100) address1 = models.CharField("Address line 1", max_length=500,) address2 = models.CharField( "Address line 2", max_length=500, blank=True, null=True) zip_code = models.CharField( max_length=100, choices=ZIP_CHOICES, default='421202', help_text="We only operate in these locations for now!") city = models.CharField("City", max_length=100,) class Meta: verbose_name = "Shipping Address" verbose_name_plural = "Shipping Addresses" def status(self): return self.address1 is "" or self.phone_number is "" def __str__(self): return "{},{},{},{},{},{}".format(self.profile.user.username,self.flatnumber, self.address1, self.address2, self.zip_code, self.phone_number) class Order(models.Model): owner = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True) items = models.ManyToManyField(Book) date_ordered = models.DateTimeField(auto_now=True) def get_cart_items(self): return self.items.all() def __str__(self): return self.owner.user.username class UserCollection(models.Model): owner = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True) books = models.ManyToManyField(Book) date_ordered = models.DateTimeField(auto_now=True) def get_collection_items(self): return self.books.all() def __str__(self): return self.owner.user.username class Requests(models.Model): requester = models.ForeignKey( User, related_name='to_user', on_delete=models.CASCADE) offerrer = models.ForeignKey( User, related_name='from_user', on_delete=models.CASCADE) timestamp = models.DateTimeField(auto_now_add=True) requester_book = models.ForeignKey( Book, related_name='requester_book_from_user', on_delete=models.CASCADE) def __str__(self): return "Request from {}, to {} ,with Book {}".format(self.requester.username, self.offerrer.username, self.requester_book.book_name) class Meta: verbose_name_plural = "Requests" class Transaction(models.Model): requester = models.ForeignKey( User, related_name='requester', on_delete=models.CASCADE) offerrer = models.ForeignKey( User, related_name='offerrer', on_delete=models.CASCADE) timestamp = models.DateTimeField(auto_now_add=True) requester_book = models.ForeignKey( Book, related_name='requested_book_from_user', on_delete=models.CASCADE) offerrer_book = models.ForeignKey( Book, related_name='offerrer_book_from_user', on_delete=models.CASCADE) requester_address = models.ForeignKey( ShippingAddress, related_name='user_address', null=True, on_delete=models.CASCADE) offerrer_address = models.ForeignKey( ShippingAddress, related_name='seller_address', null=True, on_delete=models.CASCADE) def __str__(self): return "From {}, to {} and {} to {}".format(self.requester.username, self.offerrer.username, self.requester_book.book_name, self.offerrer_book.book_name) class OldRequests(models.Model): requester = models.ForeignKey( User, related_name='old_to_user', on_delete=models.CASCADE) offerrer = models.ForeignKey( User, related_name='old_from_user', on_delete=models.CASCADE) requester_book = models.ForeignKey( Book, related_name='old_requester_book_from_user', on_delete=models.CASCADE, ) def __str__(self): return "From {}, to {} ,with Book {}".format(self.requester.username, self.offerrer.username, self.requester_book.book_name) class Meta: verbose_name_plural = "Old Requests" class FinalBuyOrder(models.Model): user = models.ForeignKey(User, related_name='user', on_delete=models.CASCADE) book = models.ForeignKey(Book, on_delete=models.CASCADE) seller = models.ForeignKey( User, related_name='seller', on_delete=models.CASCADE) useraddress = models.ForeignKey( ShippingAddress, related_name='address', on_delete=models.CASCADE) selleraddress = models.ForeignKey( ShippingAddress, related_name='selleraddress', on_delete=models.CASCADE) date_ordered = models.DateTimeField(auto_now_add=True) total_price = models.IntegerField(null=True) def __str__(self): return self.book.book_name class CompletedTransaction(models.Model): requester = models.ForeignKey( User, related_name='completed_requester', on_delete=models.CASCADE) offerrer = models.ForeignKey( User, related_name='completed_offerrer', on_delete=models.CASCADE) timestamp = models.DateTimeField(auto_now_add=True) requester_book_name = models.CharField(max_length=100, blank=True, null=True) offerrer_book_name = models.CharField( max_length=100, blank=True, null=True) requester_author_name = models.CharField( max_length=100, blank=True, null=True) offerrer_author_name = models.CharField( max_length=100, blank=True, null=True) requester_address = models.ForeignKey( ShippingAddress, related_name='completed_user_address', null=True, on_delete=models.CASCADE) offerrer_address = models.ForeignKey( ShippingAddress, related_name='completed_seller_address', null=True, on_delete=models.CASCADE) def __str__(self): return "From {}, to {} Book1 is {} Book2 is{}".format(self.requester.username, self.offerrer.username, self.requester_book_name, self.offerrer_book_name) class CompletedBuyOrder(models.Model): user = models.ForeignKey(User, related_name='completed_user', blank=True, null=True, on_delete=models.CASCADE) book_name = models.CharField(max_length=100, blank=True, null=True) author_name = models.CharField(max_length=100, blank=True, null=True) seller = models.ForeignKey( User, related_name='completed_seller', blank=True, null=True, on_delete=models.CASCADE) useraddress = models.ForeignKey( ShippingAddress, related_name='completed_address', blank=True, null=True, on_delete=models.CASCADE) selleraddress = models.ForeignKey( ShippingAddress, related_name='completed_selleraddress', blank=True, null=True, on_delete=models.CASCADE) date_ordered = models.DateTimeField(auto_now_add=True) total_price = models.IntegerField(null=True) def __str__(self): return self.book_name @receiver(post_save, sender=User) def create_profile(sender, instance, created, **kwargs): if created: Profile.objects.create(user=instance) Order.objects.create(owner=instance.profile) ShippingAddress.objects.create(profile=instance.profile) UserCollection.objects.create(owner=instance.profile) @receiver(post_save, sender=User) def save_profile(sender, instance, **kwargs): instance.profile.save() # class EmailThread(threading.Thread): # def __init__(self, subject, html_content, recipient_list): # self.subject = subject # self.recipient_list = recipient_list # self.html_content = html_content # threading.Thread.__init__(self) # def run(self): # msg = EmailMessage(self.subject, self.html_content, to=self.recipient_list) # msg.send() class EmailThreadAPI(threading.Thread): def __init__(self, subject, text, recepient): self.subject = subject self.text = text self.recepient = recepient threading.Thread.__init__(self) def run(self): requests.post(MAILGUN_REQUEST_URL, auth=('api', MAILGUN_KEY), data={ 'from': 'cadabrabooks@gmail.com', 'to': self.recepient, 'subject': self.subject, 'text': self.text, }) def send_sms(mobiles,message): parms = { 'authkey': MSG_SMS_AUTH_KEY, 'country': '91', 'sender': 'CDABRA', 'route': '4', 'mobiles': mobiles, 'message': message, } request = requests.post(url=MSG_SMS_URL, params=parms) # Done @receiver(post_save, sender=Transaction) def send_transaction_email(sender, instance, created, **kwargs): if created: EmailThreadAPI(subject='Exchange Order created for your book ' + instance.offerrer_book.book_name, text='An exchange order has been created for "' + instance.requester_book.book_name + '" from "' + instance.requester.username + '" and "'+instance.offerrer_book.book_name + '" from "' + instance.offerrer.username + '". Go to https://www.cadabra.co.in/transaction/orders/ to view the order. Thank You for ordering. Delivery of your book order will be attempted by CADABRA within one week.', recepient=instance.offerrer.email).start() EmailThreadAPI(subject='Exchange Order created for your book ' + instance.requester_book.book_name, text='An exchange order has been created for "' + instance.requester_book.book_name + '" from "' + instance.requester.username + '" and "'+instance.offerrer_book.book_name + '" from "' + instance.offerrer.username + '". Go to https://www.cadabra.co.in/transaction/orders/ to view the order. Thank You for ordering. Delivery of your book order will be attempted by CADABRA within one week.', recepient=instance.requester.email).start() send_sms(mobiles=instance.offerrer_address.phone_number, message='An exchange order has been created for "' + instance.requester_book.book_name + '" from user ' + instance.requester.username + ' and "'+instance.offerrer_book.book_name + '" from user ' + instance.offerrer.username + '. Go to https://www.cadabra.co.in/transaction/orders/ to view the order. Thank You for ordering. Delivery of your book order will be attempted by CADABRA within one week.') send_sms(mobiles=instance.requester_address.phone_number, message='An exchange order has been created for "' + instance.requester_book.book_name + '" from user ' + instance.requester.username + ' and "'+instance.offerrer_book.book_name + '" from user ' + instance.offerrer.username + '. Go to https://www.cadabra.co.in/transaction/orders/ to view the order. Thank You for ordering. Delivery of your book order will be attempted by CADABRA within one week.') # Done @receiver(post_save, sender=Requests) def send_request_email(sender, instance, created, **kwargs): if created: EmailThreadAPI(subject='New Request for book "'+instance.requester_book.book_name + '"', text='You have recieved a new request from user "'+instance.requester.username + '" for book "' + instance.requester_book.book_name + '". Go to https://cadabra.co.in/transaction/offers/ for more details.', recepient=instance.offerrer.email).start() # Done @receiver(post_save, sender=FinalBuyOrder) def send_buyorder_email(sender, instance, created, **kwargs): if created: EmailThreadAPI(subject='Buy Order successfully placed', text='You have successfully ordered book "' + instance.book.book_name + '" . Amount payable is Rs ' + str(instance.total_price) + '. Go to https://www.cadabra.co.in/transaction/orders/ to view the order. CADABRA will deliver the book within one week.', recepient=instance.user.email).start() EmailThreadAPI(subject='Order for your book "' + instance.book.book_name + '"', text='Your book "' + instance.book.book_name + '" has been sold to user'+instance.user.username+ '. You can view the item at http://cadabra.co.in/userbooks/sold/. CADABRA will pick up the book within one week.', recepient=instance.seller.email).start() send_sms(mobiles=instance.selleraddress.phone_number, message='Your book "' + instance.book.book_name + '" has been sold. CADABRA will pick up the book within one week.') # Done @receiver(post_save, sender=CompletedTransaction) def send_completed_transaction_email(sender, instance, created, **kwargs): if created: EmailThreadAPI(subject='Delivery Completed ', text='Delivery for "' + instance.requester_book_name + '" from "' + instance.requester.username + '" and "'+instance.offerrer_book_name + '" from "' + instance.offerrer.username + '" has been completed. Thank You for using CADABRA.', recepient=instance.offerrer.email).start() EmailThreadAPI(subject='Delivery Completed ', text='Delivery for "' + instance.requester_book_name + '" from "' + instance.requester.username + '" and "'+instance.offerrer_book_name + '" from "' + instance.offerrer.username + '" has been completed. Thank You for using CADABRA.', recepient=instance.requester.email).start() #Done @receiver(post_save, sender=CompletedBuyOrder) def send_completed_buy_order_email(sender, instance, created, **kwargs): if created: EmailThreadAPI(subject='Delivery Completed ', text='Delivery for "' + instance.book_name + '" has been completed. Thank you for using CADABRA.', recepient=instance.user.email).start() # Done @receiver(pre_delete, sender=Requests) def cancel_requests_email(sender, instance, **kwargs): EmailThreadAPI(subject='Request cancelled for "' + instance.requester_book.book_name + '"', text='Request for "' + instance.requester_book.book_name + '" from "' + instance.requester.username + '" is cancelled. Go to https://www.cadabra.co.in/ for more details.', recepient=instance.offerrer.email).start() # Done @receiver(pre_delete, sender=Transaction) def delete_transaction_email(sender, instance, **kwargs): EmailThreadAPI(subject='Exchange Order cancelled ', text='An exchange order has been cancelled for book "' + instance.requester_book.book_name + '" from "' + instance.requester.username + '" and book "'+instance.offerrer_book.book_name + '" from "' + instance.offerrer.username + '".Go to https://www.cadabra.co.in/ for more books. Thank You for using CADABRA.', recepient=instance.offerrer.email).start() EmailThreadAPI(subject='Exchange Order cancelled ', text='An exchange order has been cancelled for book "' + instance.requester_book.book_name + '" from "' + instance.requester.username + '" and book "'+instance.offerrer_book.book_name + '" from "' + instance.offerrer.username + '".Go to https://www.cadabra.co.in/ for more books. Thank You for using CADABRA.', recepient=instance.requester.email).start() send_sms(mobiles=instance.offerrer_address.phone_number, message='An exchange order has been cancelled for book "' + instance.requester_book.book_name + '" from ' + instance.requester.username + ' and book '+instance.offerrer_book.book_name + ' from "' + instance.offerrer.username + '".') # Done @receiver(pre_delete, sender=FinalBuyOrder) def delete_buyorder_email(sender, instance, **kwargs): EmailThreadAPI(subject='Buy Order cancelled', text='Buy Order for book "' + instance.book.book_name + '" is cancelled from user.', recepient=instance.seller.email).start() send_sms(mobiles=instance.selleraddress.phone_number, message='Buy Order for book "' + instance.book.book_name + '" is cancelled from user '+instance.user.username+'.')
/coreapp/urls.py
from django.conf.urls import url from coreapp import views from django.urls import path app_name='coreapp' urlpatterns = [ path('', views.BookListView.as_view(), name='list_entries'), path('buy/', views.BuyListView.as_view(), name='buy_entries'), path('exchange/', views.ExchangeListView.as_view(), name='exchange_entries'), path('newentry/', views.new_entry, name='new_entry'), path('bookdetail/<int:pk>/view', views.BookDetailView.as_view(), name='book_detail'), path('profile/',views.profile,name='profile'), path('profile/edit',views.update_profile,name='profile_edit'), path('profile/address/edit', views.update_address,name='address_edit'), path('signup/',views.SignUp.as_view(),name='signup'), path('userbooks/', views.UserBookListView.as_view(),name='userbooks'), path('userbooks/sold/',views.UserBookSoldItemsView.as_view(),name='sold-books'), path('userbooks/<str:username>', views.UserBookListViewForUser.as_view(),name='userbooksforuser'), path('userbooks/<int:pk>/update',views.PostUpdateView.as_view(),name='post-update'), path('userbooks/<int:pk>/delete',views.PostDeleteView.as_view(),name='post-delete'), path('aboutus/',views.aboutus,name='aboutus') ]
/coreapp/views.py
from django.shortcuts import render, redirect, get_object_or_404 from django.views.generic import CreateView, ListView, DetailView, UpdateView, DeleteView from django.views.generic.edit import FormView from django.urls import reverse_lazy, reverse from .forms import UserCreationForm, NewEntryForm, UserForm, ProfileForm, ShippingAddressForm from django.contrib import messages from django.contrib.auth.decorators import login_required from coreapp.models import Book, Profile, UserCollection, ShippingAddress, FinalBuyOrder, Transaction, CompletedBuyOrder, Requests from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.db.models import Q from django.contrib.messages.views import SuccessMessageMixin from django.core.cache import cache from django.core.paginator import Paginator from django.db.models import Case, When import random import requests import json from nofapapp.settings import GOOGLE_BOOKS_URL ordered_books = FinalBuyOrder.objects.values_list('book') requester_books = Transaction.objects.values_list('requester_book') offerrer_books = Transaction.objects.values_list('offerrer_book') @login_required def profile(request): address = ShippingAddress.objects.get(profile=request.user.profile) return render(request, 'profile.html', {'profile': request.user.profile, 'address': address}) class SignUp(SuccessMessageMixin, CreateView): form_class = UserCreationForm success_url = reverse_lazy('login') template_name = 'signup.html' success_message = 'Account Created! You can now Login!' class BookListView( ListView): model = Book template_name = 'list_entries.html' context_object_name = 'books' paginate_by = 12 def get_queryset(self): # ordered_books = FinalBuyOrder.objects.values_list('book') # requester_books = Transaction.objects.values_list('requester_book') # offerrer_books = Transaction.objects.values_list('offerrer_book') if not self.request.session.get('all_books'): self.request.session['all_books'] = random.randrange(0, 3) id_list = cache.get('all_books_%d' % self.request.session['all_books']) if not id_list: id_list = [object['id'] for object in Book.objects.values('id').all().order_by('?')] cache.set('all_books_%d' % self.request.session['all_books'], id_list, 200) preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(id_list)]) if self.request.user.is_authenticated: book_object_list = Book.objects.exclude(user=self.request.user).exclude(id__in=ordered_books).exclude( id__in=requester_books).exclude(id__in=offerrer_books).filter(id__in=id_list).order_by(preserved) else: book_object_list = Book.objects.exclude(id__in=ordered_books).exclude( id__in=requester_books).exclude(id__in=offerrer_books).filter(id__in=id_list).order_by(preserved) return book_object_list # return Book.objects.exclude(user=self.request.user).exclude(id__in=ordered_books).exclude( # id__in=requester_books).exclude(id__in=offerrer_books).order_by('-created_at') class BuyListView(ListView): model = Book template_name = 'list_entries.html' context_object_name = 'books' paginate_by = 12 def get_queryset(self): # ordered_books = FinalBuyOrder.objects.values_list('book') if not self.request.session.get('buy_books'): self.request.session['buy_books'] = random.randrange(0, 3) id_list = cache.get('buy_books_%d' % self.request.session['buy_books']) if not id_list: id_list = [object['id'] for object in Book.objects.values('id').all().order_by('?')] cache.set('buy_books_%d' % self.request.session['buy_books'], id_list, 200) preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(id_list)]) if self.request.user.is_authenticated: book_object_list = Book.objects.exclude(user=self.request.user).exclude( id__in=ordered_books).filter(sell_or_exchange='Sell').filter(id__in=id_list).order_by(preserved) else: book_object_list = Book.objects.filter(id__in=id_list).exclude( id__in=ordered_books).filter( sell_or_exchange='Sell').order_by(preserved) return book_object_list # return Book.objects.exclude(user=self.request.user).exclude(id__in=ordered_books).filter(sell_or_exchange='Sell').order_by('-created_at') class ExchangeListView( ListView): model = Book template_name = 'list_entries.html' context_object_name = 'books' paginate_by = 12 def get_queryset(self): # requester_books = Transaction.objects.values_list('requester_book') # offerrer_books = Transaction.objects.values_list('offerrer_book') if not self.request.session.get('exchange_books'): self.request.session['exchange_books'] = random.randrange(0, 3) id_list = cache.get('exchange_books_%d' % self.request.session['exchange_books']) if not id_list: id_list = [object['id'] for object in Book.objects.values('id').all().order_by('?')] cache.set('exchange_books_%d' % self.request.session['exchange_books'], id_list, 200) preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(id_list)]) if self.request.user.is_authenticated: book_object_list = Book.objects.exclude(user=self.request.user).exclude(id__in=requester_books).exclude( id__in=offerrer_books).filter(sell_or_exchange='Exchange').order_by(preserved) else: book_object_list = Book.objects.exclude(id__in=requester_books).exclude( id__in=offerrer_books).filter(id__in=id_list).filter( sell_or_exchange='Exchange').order_by(preserved) return book_object_list # return Book.objects.exclude(user=self.request.user).exclude(id__in=requester_books).exclude(id__in=offerrer_books).filter(sell_or_exchange='Exchange').order_by('-created_at') class UserBookListView(LoginRequiredMixin, ListView): model = Book template_name = 'user_books_list_entries.html' context_object_name = 'books' ordering = ['-created_at'] def get_queryset(self): # ordered_books = FinalBuyOrder.objects.values_list('book') # requester_books = Transaction.objects.values_list('requester_book') # offerrer_books = Transaction.objects.values_list('offerrer_book') collection_items = UserCollection.objects.get( owner=self.request.user.profile) return collection_items.books.exclude(id__in=ordered_books).exclude( id__in=requester_books).exclude(id__in=offerrer_books) class UserBookSoldItemsView(LoginRequiredMixin, ListView): model = Book template_name = 'user_books_sold_list_entries.html' context_object_name = 'books' ordering = ['-created_at'] def get_queryset(self): # ordered_books = FinalBuyOrder.objects.values_list('book') # requester_books = Transaction.objects.values_list('requester_book') # offerrer_books = Transaction.objects.values_list('offerrer_book') return CompletedBuyOrder.objects.filter(seller=self.request.user).order_by('date_ordered') class UserBookListViewForUser( ListView): model = Book template_name = 'collection_user_entries.html' context_object_name = 'books' ordering = ['-created_at'] def get_queryset(self): user = self.kwargs['username'] user_profile = get_object_or_404(Profile, user__username=user) collection_items = UserCollection.objects.get( owner=user_profile) # ordered_books = FinalBuyOrder.objects.values_list('book') # requester_books = Transaction.objects.values_list('requester_book') # offerrer_books = Transaction.objects.values_list('offerrer_book') return collection_items.books.exclude(id__in=ordered_books).exclude( id__in=requester_books).exclude(id__in=offerrer_books) class BookDetailView(DetailView): model = Book template_name = 'book_detail_view.html' @login_required def new_entry(request): if request.method == 'POST' and 'check' in request.POST: new_entry_form = NewEntryForm(request.POST, instance=request.user) book_name = new_entry_form.data['book_name'] if book_name: parms = {"q": book_name, "printType": "books", "projection": "lite"} r = requests.get( url=GOOGLE_BOOKS_URL, params=parms) items = json.loads(r.text) return render(request, 'new_entry.html', {'form': new_entry_form, 'items': items['items']}) else: return render(request, 'new_entry.html', {'form': new_entry_form}) if request.method == 'POST' and 'submitentry' in request.POST: new_entry_form = NewEntryForm(request.POST, instance=request.user) if new_entry_form.is_valid(): address = get_object_or_404( ShippingAddress, profile=request.user.profile) if address.address1 == '': messages.info( request, 'You need to update address in profile to add a book!') return redirect('coreapp:new_entry') if new_entry_form.cleaned_data['price'] is None and new_entry_form.cleaned_data['sell_or_exchange'] == 'Sell': messages.info(request,"Price cannot be blank in Sell order") return redirect('coreapp:new_entry') book = Book() book.user = request.user book.book_name = new_entry_form.cleaned_data['book_name'] book.author_name = new_entry_form.cleaned_data['author_name'] book.price = new_entry_form.cleaned_data['price'] book.description = new_entry_form.cleaned_data['description'] book.sell_or_exchange = new_entry_form.cleaned_data['sell_or_exchange'] book.image_url = new_entry_form.cleaned_data['image_url'] book.save() collection, status = UserCollection.objects.get_or_create( owner=request.user.profile) collection.books.add(book) collection.save() return redirect('coreapp:userbooks') else: return render(request, 'new_entry.html', {'form': new_entry_form}) new_entry_form = NewEntryForm(instance=request.user) return render(request, 'new_entry.html', { 'form': new_entry_form, }) class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView): model = Book form_class = NewEntryForm template_name = 'new_entry_update.html' success_url = reverse_lazy('coreapp:userbooks') def form_valid(self, form): form.instance.user = self.request.user return super().form_valid(form) def test_func(self): book = self.get_object() if self.request.user == book.user: return True return False class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView): model = Book success_url = reverse_lazy('coreapp:userbooks') template_name = 'book_confirm_delete.html' def test_func(self): book = self.get_object() if self.request.user == book.user: return True return False def delete(self, *args, **kwargs): reponse = super(PostDeleteView, self).delete(*args, *kwargs) if not (UserCollection.objects.get( owner=self.request.user.profile).books.filter( sell_or_exchange='Exchange').exclude(id__in=offerrer_books).exclude(id__in=requester_books).exists()): Requests.objects.filter(requester=self.request.user).delete() return reponse @login_required def update_profile(request): if request.method == 'POST': user_form = UserForm(request.POST, instance=request.user) if user_form.is_valid(): user_form.save() messages.success(request, ( 'Your profile was successfully updated!')) return redirect('coreapp:profile') else: user_form = UserForm(instance=request.user) return render(request, 'profile_edit.html', { 'user_form': user_form, }) @login_required def update_address(request): if request.method == 'POST': address_form = ShippingAddressForm( request.POST, instance=request.user.profile.address) if address_form.is_valid(): address_form.save() messages.success(request, ('Address successfully updated!')) return redirect('coreapp:profile') else: address_form = ShippingAddressForm( instance=request.user.profile.address) return render(request, 'address_edit.html', { 'address_form': address_form }) def aboutus(request): return render(request, 'aboutus.html')
/nofapapp/context_processors.py
from coreapp.models import Transaction, ShippingAddress, Requests, Profile, Order, FinalBuyOrder, UserCollection from django.shortcuts import get_object_or_404 from coreapp.forms import ShippingAddressForm from django.db.models import Q def add_variable_to_context(request): if request.user.id: requester_books = Transaction.objects.values_list('requester_book') offerrer_books = Transaction.objects.values_list('offerrer_book') # addressformcontext=ShippingAddressForm( # request.POST, instance=request.user.profile.address) orderitems = Order.objects.get(owner=request.user.profile) return { 'requestscount': Requests.objects.filter(requester=request.user).count(), 'offercount' : Requests.objects.filter(offerrer=request.user).count(), 'orderscount': Transaction.objects.filter(Q(offerrer=request.user) | Q(requester=request.user)).count()+FinalBuyOrder.objects.filter(user=request.user).count(), 'cartitemscount': orderitems.items.count(), 'addresscheck': get_object_or_404(ShippingAddress, profile=get_object_or_404(Profile, user=request.user)).address1, # 'addressformcontext': addressformcontext 'exchangeitemexists': UserCollection.objects.get( owner=request.user.profile).books.exclude( id__in=requester_books).exclude(id__in=offerrer_books).filter(sell_or_exchange="Exchange").exists(), } else: return {}
/nofapapp/urls.py
from django.conf.urls import url,include from django.contrib import admin from django.contrib.auth.views import LoginView, LogoutView, PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, PasswordResetCompleteView from django.conf import settings from django.conf.urls.static import static urlpatterns = [ url('', include('coreapp.urls')), url('transaction/', include('transaction.urls', namespace='transaction')), url('search/',include('search.urls',namespace='search')), url('cartitems/',include('cart.urls', namespace='cart')), url(r'^login/$', LoginView.as_view(), name='login'), url(r'^logout/$', LogoutView.as_view(), name='logout'), url('^', include('django.contrib.auth.urls')), url(r'^cadabraadmin/', admin.site.urls), url(r'^password_reset/$', PasswordResetView.as_view() , name='password_reset'), url(r'^password_reset/done/$', PasswordResetDoneView.as_view(), name='password_reset_done'), url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', PasswordResetConfirmView.as_view(), name='password_reset_confirm'), url(r'^reset/done/$', PasswordResetCompleteView.as_view(), name='password_reset_complete'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
/search/forms.py
from django import forms from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from coreapp.models import Book, Profile from dal import autocomplete class SearchForm(forms.Form): search = forms.CharField( max_length=100, widget=autocomplete.ModelSelect2(url='country-autocomplete')) # class Meta: # widgets = {'search': autocomplete.ListSelect2( # url='content-autocomplete')} # def __init__(self, *args, **kwargs): # super(FeedbackInfoInputModelForm, self).__init__(*args, **kwargs) # self.fields['search'].widget = forms.TextInput(attrs={ # 'id': 'search', # })
/search/urls.py
from django.urls import path from search import views app_name = 'searchapp' urlpatterns = [ path('', views.SearchListView.as_view(), name='search'), # path('', views.search_form , name='search'), path('content-autocomplete/', views.ContentAutoComplete.as_view(),name='content-autocomplete'), # path('search1/', views.SearchAutoComplete.as_view(), name='search1'), ]
/search/views.py
from django.shortcuts import render from dal import autocomplete from django.views import generic from django.views.generic.edit import FormView from django.urls import reverse_lazy, reverse from search.forms import SearchForm from django.contrib import messages from django.contrib.auth.decorators import login_required from coreapp.models import Book, Profile, Transaction, FinalBuyOrder from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.contrib.messages.views import SuccessMessageMixin from django.db.models import Q class SearchListView(LoginRequiredMixin, generic.ListView): model = Book template_name = 'list_entries.html' context_object_name = 'books' ordering = ['-created_at'] def get_queryset(self): book_name = self.request.GET.get('search') ordered_books = FinalBuyOrder.objects.values_list('book') requester_books = Transaction.objects.values_list('requester_book') offerrer_books = Transaction.objects.values_list('offerrer_book') return Book.objects.exclude(user=self.request.user).exclude(id__in=ordered_books).exclude(id__in=requester_books).exclude(id__in=offerrer_books).filter(Q(book_name__icontains=book_name) | Q(author_name__icontains=book_name)) # return Book.objects.filter(Q(book_name__icontains=book_name)) | Book.objects.filter(Q(author_name__icontains=book_name)) # def search_form(request): # if request.method == 'POST': # form = SearchForm(request.POST) # if form.is_valid(): # form.save() # request.session['home_request'] = True # return redirect('coreapp:list_entries') # else: # form = SearchForm() # return render(request, 'list_entries.html', {'form': form}) # class SearchAutoComplete(FormView): # template_name = 'list_entries.html' # form_class = SearchForm # success_url = reverse_lazy('coreapp:list_entries') class ContentAutoComplete(autocomplete.Select2QuerySetView): def get_queryset(self): qs = Book.objects.all() if self.q: qs = qs.filter(book_name__istartswith=self.q) return qs
/transaction/transaction/templatetags/user_books_tag.py
from django import template from coreapp.models import Profile, Transaction, FinalBuyOrder from django.shortcuts import get_object_or_404 register = template.Library() @register.filter def user_books_for_user(collection_all, user_username): ordered_books = FinalBuyOrder.objects.values_list('book') requester_books = Transaction.objects.values_list('requester_book') offerrer_books = Transaction.objects.values_list('offerrer_book') user_profile = get_object_or_404(Profile, user__username=user_username) collection_items = collection_all.get( owner=user_profile) return collection_items.books.filter( sell_or_exchange='Exchange').exclude(id__in=ordered_books).exclude(id__in=offerrer_books).exclude(id__in=requester_books) @register.filter def author_list(authors): if authors: author_name = ",".join(authors) return author_name + '.' else: return authors
/transaction/urls.py
from django.conf.urls import url from transaction import views from django.urls import path app_name = 'transaction' urlpatterns = [ path('<int:book_id>/add', views.add_request, name='add_request'), path('requests/', views.RequestListView.as_view(), name='requests_view'), path('requests/<int:pk>/delete',views.RequestDeleteView.as_view(), name='request-delete'), path('offers/', views.OfferListView.as_view(), name='offers_view'), path('offers/<int:pk>/delete', views.OfferDeleteView.as_view(), name='offer-delete'), path('offers/<int:offer_id>/<int:book_id>/finaltransaction', views.final_transaction ,name='final-transaction'), path('orders/', views.TransactionListView.as_view(), name='orders_view'), path('orders/<int:pk>/delete', views.TransactionDeleteView.as_view(),name='order-delete'), path('orders/exchange-orders', views.TransactionCompletedExchangeOrder.as_view(), name='exchange-order'), path('orders/buy-orders', views.TransactionCompletedBuyOrder.as_view(), name='buy-order'), ]
/transaction/views.py
from django.shortcuts import render from django.views.generic import ListView from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404 from coreapp.models import Book, Requests, Transaction, UserCollection, FinalBuyOrder, OldRequests, Profile, ShippingAddress, CompletedTransaction, CompletedBuyOrder, cancel_requests_email from coreapp.models import Requests from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.views import generic from django.contrib import messages from django.urls import reverse_lazy, reverse from coreapp.forms import ShippingAddressForm from django.db.models import Q from django.utils import timezone import datetime from django.db import transaction from django.db.models import signals ordered_books = FinalBuyOrder.objects.values_list('book') requester_books = Transaction.objects.values_list('requester_book') offerrer_books = Transaction.objects.values_list('offerrer_book') @transaction.atomic @login_required def final_transaction(request, offer_id, book_id): offerrer_book = get_object_or_404(Book, id=book_id) new_request = get_object_or_404(Requests,id=offer_id) address_form = ShippingAddressForm( request.POST, instance=request.user.profile.address) if request.method == "POST" and 'Yes' in request.POST: offerrer_address = get_object_or_404(ShippingAddress, profile=request.user.profile) # requester_profile = get_object_or_404( # Profile, user=new_request.requester) requester_address = get_object_or_404( ShippingAddress, profile=new_request.requester.profile) new_order = Transaction(requester=new_request.requester, offerrer=new_request.offerrer, requester_book=new_request.requester_book, offerrer_book=offerrer_book, requester_address=requester_address, offerrer_address=offerrer_address) old_request = OldRequests(requester=new_request.requester, offerrer=new_request.offerrer, requester_book=new_request.requester_book) # delete entry from requests signals.pre_delete.disconnect(cancel_requests_email, sender=Requests) new_request.delete() signals.pre_delete.connect(cancel_requests_email, sender=Requests) # save old request # save new order new_order.save() old_request.save() if not UserCollection.objects.get( owner=new_request.requester.profile).books.filter( sell_or_exchange='Exchange').exclude(id__in=offerrer_books).exclude(id__in=requester_books): Requests.objects.filter(requester=new_request.requester).delete() return redirect('transaction:orders_view') if request.method == 'POST' and 'updateadd' in request.POST: if address_form.is_valid(): address_form.save() messages.success(request, ('Address successfully updated!')) offer = get_object_or_404(Requests,id=offer_id) address = get_object_or_404(ShippingAddress, profile=request.user.profile) context = {'offer': offer, 'book': offerrer_book, 'address': address, 'address_form': address_form} return render(request,'transaction_final.html',context) @transaction.atomic @login_required def add_request(request,book_id): book = get_object_or_404(Book, id=book_id) new_request = Requests(requester=request.user, offerrer=book.user , requester_book=book) address = ShippingAddress.objects.get(profile=request.user.profile) if address.status(): messages.info(request, "You need to add address in profile to make request !") else: if (UserCollection.objects.get( owner=request.user.profile).books.exclude( id__in=requester_books).exclude(id__in=offerrer_books).filter(sell_or_exchange="Exchange").exists()): if Requests.objects.filter(requester=request.user, offerrer=book.user, requester_book=book).exists(): messages.info(request,"Request for this book already made!") else: date_from = datetime.datetime.now( tz=timezone.utc) - datetime.timedelta(days=1) no_of_requests_made_in_one_day = Requests.objects.filter(requester=request.user, timestamp__gte=date_from).count() if no_of_requests_made_in_one_day>5: messages.warning(request,"Maximum requests exceeded for one day : you can make maximum of 6 requests") else: messages.info( request, "New Request !") new_request.save() return redirect('transaction:requests_view') else: messages.info(request, ('You need to add "Exchange" items to your collection to make a request!')) return redirect('coreapp:list_entries') class RequestListView(LoginRequiredMixin, generic.ListView): model = Requests template_name = 'transaction_request.html' context_object_name = 'requests' def get_queryset(self): return Requests.objects.filter(requester=self.request.user).order_by('timestamp') class RequestDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView): model = Requests success_url = reverse_lazy('transaction:requests_view') template_name = 'transaction_request_confirm_delete.html' def test_func(self): request_object = self.get_object() if self.request.user == request_object.requester: return True return False class OfferListView(LoginRequiredMixin, generic.ListView): model = Requests template_name = 'transaction_offer.html' context_object_name = 'offers' def get_queryset(self): return Requests.objects.filter(offerrer=self.request.user).order_by('timestamp') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['collection_all'] = UserCollection.objects.all() return context class OfferDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView): model = Requests success_url = reverse_lazy('transaction:offers_view') template_name = 'transaction_offer_confirm_delete.html' def test_func(self): offer = self.get_object() if self.request.user == offer.offerrer: return True return False def delete(self, request, *args, **kwargs): signals.pre_delete.disconnect(cancel_requests_email, sender=Requests) self.object = self.get_object() self.object.delete() signals.pre_delete.connect(cancel_requests_email, sender=Requests) return redirect(self.get_success_url()) class TransactionListView(LoginRequiredMixin, generic.ListView): model = Transaction template_name = 'transaction_order.html' context_object_name = 'orders' def get_queryset(self): return Transaction.objects.filter(Q(offerrer=self.request.user) | Q(requester=self.request.user)).order_by('timestamp') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['buy_order'] = FinalBuyOrder.objects.filter( user=self.request.user).order_by('-date_ordered') return context class TransactionDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView): model = Transaction success_url = reverse_lazy('transaction:orders_view') template_name = 'transaction_order_confirm_delete.html' def test_func(self): transaction = self.get_object() if self.request.user == transaction.requester or self.request.user == transaction.offerrer: return True return False class TransactionCompletedExchangeOrder(LoginRequiredMixin, generic.ListView): model = CompletedTransaction template_name = 'transaction_completed_exchange_order.html' context_object_name = 'orders' def get_queryset(self): return CompletedTransaction.objects.filter(Q(offerrer=self.request.user) | Q(requester=self.request.user)).order_by('timestamp') class TransactionCompletedBuyOrder(LoginRequiredMixin, generic.ListView): model = CompletedBuyOrder template_name = 'transaction_completed_buy_order.html' context_object_name = 'buy_order' def get_queryset(self): return CompletedBuyOrder.objects.filter(user=self.request.user).order_by('date_ordered')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Therapoid/django-assent
refs/heads/master
{"/assent/views/assent.py": ["/assent/views/forms.py", "/assent/models.py"], "/assent/admin.py": ["/assent/models.py"], "/tests/conftest.py": ["/assent/models.py"], "/assent/views/forms.py": ["/assent/models.py"], "/assent/urls.py": ["/assent/views/assent.py"]}
└── ├── assent │ ├── admin.py │ ├── migrations │ │ └── 0001_initial.py │ ├── models.py │ ├── urls.py │ └── views │ ├── __init__.py │ ├── assent.py │ └── forms.py ├── tasks.py └── tests ├── conftest.py └── test_models.py
/assent/admin.py
# -*- coding: utf-8 -*- from django import forms from django.contrib import admin from django.utils.translation import ugettext as _ from transcode.conf import get_content_formatters from .models import Agreement, AgreementUser, AgreementVersion ContentFormat = get_content_formatters('ASSENT_FORMATTERS') # ===== INLINES =============================================================== class AgreementVersionForm(forms.ModelForm): content_format = forms.ChoiceField( label=_('Content Format'), required=True, choices=ContentFormat.CHOICES) class Meta: model = AgreementVersion fields = ( 'short_title', 'full_title', 'content', 'content_format', ) def __init__(self, *args, **kwargs): super(AgreementVersionForm, self).__init__(*args, **kwargs) instance = self.instance if not instance or not instance.content_format: self.fields['content_format'].initial = ContentFormat.DEFAULT class AgreementVersionInlineAdmin(admin.StackedInline): model = AgreementVersion extra = 0 readonly_fields = ('release_date', ) form = AgreementVersionForm fieldsets = ( (None, { 'fields': ( 'short_title', 'full_title', 'content', 'content_format', 'release_date', ) }), ) # ===== ADMINS ================================================================ class AgreementAdmin(admin.ModelAdmin): inlines = (AgreementVersionInlineAdmin, ) prepopulated_fields = {"slug": ("document_key",)} fieldsets = ( (None, { 'fields': ( 'document_key', 'slug', 'description', 'short_description', 'latest_version', ) }), ) admin.site.register(Agreement, AgreementAdmin) class AgreementUserAdmin(admin.ModelAdmin): list_display = ('user', 'agreement_version', 'acceptance_date', ) readonly_fields = ('acceptance_date', 'ip_address', ) fieldsets = ( (None, { 'fields': ( 'user', 'agreement_version', 'acceptance_date', 'ip_address', ) }), ) admin.site.register(AgreementUser, AgreementUserAdmin)
/assent/migrations/0001_initial.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-15 03:43 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Agreement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('document_key', models.CharField(default='', max_length=255, unique=True, verbose_name='document key')), ('slug', models.SlugField(default='', max_length=255, verbose_name='slug')), ('description', models.CharField(default='', max_length=4096, verbose_name='description')), ('short_description', models.CharField(default='', max_length=255, verbose_name='short description')), ('date_modified', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False, null=True)), ], options={ 'verbose_name': 'agreement', 'verbose_name_plural': 'agreements', }, ), migrations.CreateModel( name='AgreementUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('acceptance_date', models.DateTimeField(blank=True, null=True, verbose_name='acceptance date')), ('ip_address', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP address')), ], options={ 'verbose_name': 'agreement user', 'verbose_name_plural': 'agreement users', }, ), migrations.CreateModel( name='AgreementVersion', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('short_title', models.CharField(default='', max_length=255, verbose_name='short title')), ('full_title', models.CharField(default='', max_length=1023, verbose_name='full title')), ('content', models.TextField(blank=True, default='', verbose_name='content')), ('content_format', models.CharField(default='', max_length=4, verbose_name='Content format')), ('release_date', models.DateTimeField(auto_now_add=True, verbose_name='release date')), ('agreement', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='assent.Agreement', verbose_name='agreement')), ('users', models.ManyToManyField(related_name='agreement_versions', through='assent.AgreementUser', to=settings.AUTH_USER_MODEL, verbose_name='users')), ], options={ 'verbose_name': 'agreement version', 'get_latest_by': ('release_date',), 'verbose_name_plural': 'agreement versions', 'ordering': ('-release_date',), }, ), migrations.AddField( model_name='agreementuser', name='agreement_version', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='agreement_users', to='assent.AgreementVersion', verbose_name='agreement_version'), ), migrations.AddField( model_name='agreementuser', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='agreement', name='latest_version', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='agreement_latest_version', to='assent.AgreementVersion'), ), migrations.AlterUniqueTogether( name='agreementuser', unique_together=set([('user', 'agreement_version')]), ), ]
/assent/models.py
# -*- coding: utf-8 -*- from django.conf import settings from django.core.urlresolvers import reverse from django.db import models from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from transcode.render import render class Agreement(models.Model): document_key = models.CharField( _('document key'), max_length=255, blank=False, default='', unique=True) slug = models.SlugField( _('slug'), max_length=255, blank=False, default='') description = models.CharField( _('description'), max_length=4096, blank=False, default='') short_description = models.CharField( _('short description'), max_length=255, blank=False, default='') latest_version = models.OneToOneField( 'assent.AgreementVersion', blank=True, null=True, related_name='agreement_latest_version') # Note, this should only be updated when we add a new version # This is currently done in AgreementVersion.save() date_modified = models.DateTimeField( default=timezone.now, blank=True, null=True, editable=False) class Meta: verbose_name = _('agreement') verbose_name_plural = _('agreements') def get_absolute_url(self): return reverse('assent:agreement_detail', kwargs={'slug': self.slug}) def __str__(self): return self.document_key class AgreementUser(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL) agreement_version = models.ForeignKey( to='assent.AgreementVersion', verbose_name=_('agreement_version'), related_name='agreement_users', blank=False) acceptance_date = models.DateTimeField( _('acceptance date'), blank=True, null=True) ip_address = models.GenericIPAddressField( _('IP address'), blank=True, null=True) class Meta: verbose_name = _('agreement user') verbose_name_plural = _('agreement users') unique_together = (('user', 'agreement_version', ), ) def __str__(self): return _('User: {0}, agreement: {1}').format( self.user, self.agreement_version) class AgreementVersion(models.Model): users = models.ManyToManyField( to=settings.AUTH_USER_MODEL, through='assent.AgreementUser', related_name='agreement_versions', verbose_name=_('users')) agreement = models.ForeignKey( to='assent.Agreement', verbose_name=_('agreement'), related_name='versions', blank=False) short_title = models.CharField( _('short title'), max_length=255, blank=False, default='') full_title = models.CharField( _('full title'), max_length=1023, blank=False, default='') content = models.TextField( _('content'), blank=True, default='') content_format = models.CharField( _('Content format'), max_length=4, blank=False, default='') release_date = models.DateTimeField( _('release date'), auto_now_add=True) class Meta: ordering = ('-release_date', ) get_latest_by = ('release_date', ) verbose_name = _('agreement version') verbose_name_plural = _('agreement versions') def __str__(self): return _('Agreement: "{0}" released: {1:%Y-%m-%d %H:%M}').format( self.agreement.document_key, self.release_date) def get_rendered_content(self): return render(self.content, self.content_format) def save(self, *args, **kwargs): super(AgreementVersion, self).save(*args, **kwargs) if self.pk is not None and self.agreement_id: self.agreement.latest_version = self self.agreement.date_modified = self.release_date self.agreement.save()
/assent/urls.py
# -*- coding: utf-8 -*- from django.conf.urls import url, include from .views import ( AgreementDetailView, AgreementListView, AgreementFormView, ) urlpatterns = [ url(r'^$', AgreementListView.as_view(), name='agreement_list'), url(r'^(?P<slug>[^/]+)/$', AgreementDetailView.as_view(), name='agreement_detail'), url(r'^(?P<slug>[^/]+)/accept/$', AgreementFormView.as_view(), name='agreement_form'), ]
/assent/views/__init__.py
# -*- coding: utf-8 -*- from .assent import *
/assent/views/assent.py
# -*- coding: utf-8 -*- from django.core.urlresolvers import reverse_lazy from django.contrib.auth.decorators import login_required from django.shortcuts import get_object_or_404 from django.utils import timezone from django.utils.decorators import method_decorator from django.utils.functional import cached_property from django.views.generic import DetailView, ListView from django.views.generic.detail import SingleObjectTemplateResponseMixin from django.views.generic.edit import ModelFormMixin, ProcessFormView from ..models import Agreement, AgreementUser from .forms import AgreementForm @method_decorator(login_required, name='dispatch') class AgreementListView(ListView): model = AgreementUser template_name = 'assent/agreement_list.html' def get_queryset(self): """ Override to use the user from the request as part of the query. """ queryset = super(AgreementListView, self).get_queryset() queryset = queryset.filter(user=self.request.user) return queryset class AgreementMixin(object): @cached_property def agreement(self): """ Returns the Agreement specified in the url """ slug = self.kwargs.get('slug', None) return get_object_or_404(Agreement, slug=slug) def get_object(self, queryset=None): agreement_version = self.agreement.latest_version if queryset is None: queryset = self.get_queryset() obj = queryset.filter( user=self.request.user, agreement_version=agreement_version).first() return obj def get_context_data(self, **kwargs): """ Ensures agreement and its latest_version are in the context. """ context = super(AgreementMixin, self).get_context_data(**kwargs) context['agreement'] = self.agreement context['agreement_version'] = self.agreement.latest_version return context @method_decorator(login_required, name='dispatch') class AgreementDetailView(AgreementMixin, DetailView): model = AgreementUser template_name = 'assent/agreement_detail.html' @method_decorator(login_required, name='dispatch') class AgreementFormView(AgreementMixin, SingleObjectTemplateResponseMixin, ModelFormMixin, ProcessFormView): """ This is essentially a CreateOrUpdateView. If the object dosn't exist, it is created. If it already does, then it is updated. """ model = AgreementUser template_name = 'assent/agreement_form.html' form_class = AgreementForm success_url = reverse_lazy('assent:agreement_list') def get(self, request, *args, **kwargs): self.object = self.get_object() return super(AgreementFormView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = self.get_object() return super(AgreementFormView, self).post(request, *args, **kwargs) @cached_property def client_ip_address(self): """ Attempts to return the user's IP address. :return: """ x_forwarded_for = self.request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] else: ip = self.request.META.get('REMOTE_ADDR') return ip def get_initial(self): initial = self.initial.copy() initial.update({ 'user': self.request.user, 'agreement_version': self.agreement.latest_version, 'acceptance_date': timezone.now(), 'ip_address': self.client_ip_address, }) return initial
/assent/views/forms.py
# -*- coding: utf-8 -*- from django import forms from ..models import AgreementUser class AgreementForm(forms.ModelForm): hidden_fields = ( 'user', 'agreement_version', 'ip_address', 'acceptance_date', ) class Meta: model = AgreementUser fields = ( 'user', 'agreement_version', 'ip_address', 'acceptance_date', ) def __init__(self, *args, **kwargs): """ Hides any fields listed in the class property: hidden_fields. """ super(AgreementForm, self).__init__(*args, **kwargs) for fld in self.hidden_fields: self.fields[fld].widget = forms.widgets.HiddenInput()
/tasks.py
# -*- coding: utf-8 -*- import os import sys from invoke import task BUILDDIR = "build" PROJECT = "assent" @task def clean(ctx): """Removes all the cache files""" ctx.run("find . -type d -name __pycache__ | xargs rm -rf") ctx.run('rm -rf ./.cache') builddir = os.path.join(__file__, BUILDDIR) if os.path.exists(builddir): print('Removing builddir {}'.format(builddir)) ctx.run('rm -rf {}'.format(builddir)) @task def install(ctx): """Installs the libraries required to run the application""" ctx.run("pip install -U pip") ctx.run("pip install -qr requirements/base.txt") @task(install) def develop(ctx): """Installs all the libraries used for development""" ctx.run("pip install -qr requirements/dev.txt") @task def checks(ctx): """Runs pep8/flake8 checks on the code""" excl = "--exclude='build/,*migrations/*'" ctx.run("pep8 {} .".format(excl)) ctx.run("flake8 {} .".format(excl)) @task(develop) def test(ctx): """Runs the tests""" ctx.run( 'PYTHONPATH=`pwd` ' "py.test --cov-config .coveragerc --cov-report html --cov-report term --cov={}".format( PROJECT ), pty=True ) if sys.platform == 'darwin': ctx.run('open {}/coverage/index.html'.format(BUILDDIR))
/tests/conftest.py
# -*- coding: utf-8 -*- import os import pytest from django import setup from django.utils import timezone from django.contrib.auth import get_user_model from assent.models import Agreement, AgreementUser, AgreementVersion def pytest_configure(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'example.settings') setup() @pytest.fixture def user(): obj = get_user_model().objects.create(email='test@test.com', username='test_user') return obj @pytest.fixture def agreement(): obj = Agreement.objects.create( document_key='test key', description='test description', short_description='test short description', latest_version=None, date_modified=None, ) return obj @pytest.fixture def agreement_version(agreement): obj = AgreementVersion.objects.create( agreement=agreement, short_title='test version', full_title='test version', content='test content', content_format='TEXT', release_date=None, ) agreement.latest_version = obj agreement.date_modified = obj.release_date agreement.save() return obj @pytest.fixture def agreement_user(agreement_version, user): acceptance_date = timezone.now() obj = AgreementUser.objects.create( user=user, agreement_version=agreement_version, acceptance_date=acceptance_date, ip_address='2001:db8:85a3:0:0:8a2e:370:7334' ) return obj
/tests/test_models.py
# -*- coding: utf-8 -*- import pytest @pytest.mark.django_db class TestAgreement: def test_str(self, agreement): assert str(agreement) == 'test key' @pytest.mark.django_db class TestAgreementVersion: def test_str(self, agreement_version): timestamp = agreement_version.release_date assert str(agreement_version) == 'Agreement: "test key" released: {0:%Y-%m-%d %H:%M}'.format(timestamp) def test_plain_text_render(self, agreement_version): assert agreement_version.get_rendered_content() == '<p>test content</p>' @pytest.mark.django_db class TestAgreementUser: def test_str(self, agreement_version, agreement_user, user): # Assign user to having signed the agreement assert str(agreement_user) == "User: {0}, agreement: {1}".format( user, agreement_version) def test_relationships(self, agreement_user, agreement_version, user): assert agreement_version in list(user.agreement_versions.all()) assert user in list(agreement_version.users.all())
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
luoyuan3316/mhc2flurry
refs/heads/master
{"/test/test_class2_neural_network.py": ["/mhc2flurry/allele_encoding_pair.py", "/mhc2flurry/testing_utils.py"]}
└── ├── downloads-generation │ ├── allele_sequences │ │ ├── assign_pdb_sequences_to_alpha_or_beta.py │ │ ├── extract_pdb_sequences.py │ │ ├── filter_sequences.py │ │ └── make_pseudosequences.py │ ├── data_curated │ │ ├── annotate_proteins.py │ │ ├── curate_ms_by_pmid.py │ │ └── curate_t_cell_epitopes.py │ ├── data_pdb │ │ ├── make_pdb_query.py │ │ └── parse_results.py │ └── data_proteomes │ └── index_fasta.py ├── mhc2flurry │ ├── __init__.py │ ├── allele_encoding_pair.py │ ├── downloads.py │ └── testing_utils.py └── test ├── test_class2_neural_network.py └── test_common.py
/downloads-generation/allele_sequences/assign_pdb_sequences_to_alpha_or_beta.py
# Assign PDB sequences (searched by mmseqs against IMGT sequences) # to alpha vs beta based on mmseqs results import argparse import sys import pandas import os from mhc2flurry.fasta import read_fasta_to_dataframe parser = argparse.ArgumentParser() parser.add_argument( "pdb_sequences", metavar="FASTA", help='PDB sequences') parser.add_argument( "search_results", metavar="TXT", help='mmseqs search results') parser.add_argument( "--mmseqs-output-format", metavar="A,B,C", required=True, help='mmseqs output format (comma separated list of fields)') parser.add_argument( "--out-alpha", metavar="FASTA", help='Output file') parser.add_argument( "--out-beta", metavar="FASTA", help='Output file') args = parser.parse_args(sys.argv[1:]) print(args) sequences_df = read_fasta_to_dataframe(args.pdb_sequences).set_index("sequence_id") search_df = pandas.read_csv( args.search_results, names=args.mmseqs_output_format.split(","), sep=None) search_df["kind"] = search_df.target.str.split(".").str.get(0) df = search_df.loc[ (search_df.qcov > 0.7) & (search_df.tcov > 0.5) ].sort_values("evalue").drop_duplicates("query").set_index("query") print(df) print("Breakdown by kind [should be equal or nearly equal]") print(df.kind.value_counts()) def write_fasta(filename, sub_df): with open(filename, "w") as fd: for name, row in sub_df.iterrows(): seq = sequences_df.loc[name].sequence fd.write(">pdb.%s\n" % name) fd.write(seq) fd.write("\n") print("Wrote", filename, "with", len(sub_df), "sequences") if args.out_alpha: write_fasta(args.out_alpha, df.loc[df.kind == "alpha"]) if args.out_beta: write_fasta(args.out_beta, df.loc[df.kind == "beta"])
/downloads-generation/allele_sequences/extract_pdb_sequences.py
# Given a set of PDB .cif.gz files, write out a fasta with the sequences of # each chain. This will be used to align MHC II PDB structures against # sequences from IMDB and other sources. import argparse import sys import json import os import glob import atomium parser = argparse.ArgumentParser() parser.add_argument( "input", metavar="JSON", help='Director of .cif.gz files') parser.add_argument("out", metavar="FILE.fasta", help="Out fasta file") args = parser.parse_args(sys.argv[1:]) print(args) files = glob.glob(args.input + "/*.cif.gz") print("Found %d files" % len(files)) with open(args.out, "w") as fd: for file in files: structure = atomium.open(file) for chain in structure.model.chains(): fd.write(">%s_%s %s\n" % ( structure.code, chain.id, os.path.basename(file))) fd.write("".join(c.code for c in chain.residues())) fd.write("\n") print("Wrote: ", args.out)
/downloads-generation/allele_sequences/filter_sequences.py
""" Filter and combine class II sequence fastas. """ from __future__ import print_function import sys import argparse from mhc2flurry.common import normalize_allele_name import Bio.SeqIO # pylint: disable=import-error parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( "fastas", nargs="+", help="Unaligned fastas") parser.add_argument( "--kind", required=True, choices=("alpha", "beta"), help="Chain") parser.add_argument( "--out", required=True, help="Fasta output") min_lengths = { "alpha": 200, "beta": 200, } def run(): args = parser.parse_args(sys.argv[1:]) print(args) min_length = min_lengths[args.kind] output_records = [] seen = set() sequences = set() input_records = [] for fasta in args.fastas: reader = Bio.SeqIO.parse(fasta, "fasta") input_records.extend(reader) # Iterate longest records first so that when multiple records have the # same two digit normalized allele, we use the longest one. for record in sorted(input_records, key=lambda r: len(r.seq), reverse=True): original_name = record.description.split()[1] name = normalize_allele_name(original_name) if not name: print("Skipping due to parsing", original_name) continue if name in seen: continue if len(record.seq) < min_length: print("Skipping due to short length", name, record.description) continue seen.add(name) sequences.add(record.seq) record.id = "%s.%s" % (args.kind, record.id) record.description = "%s %s" % (name, record.description) output_records.append(record) with open(args.out, "w") as fd: Bio.SeqIO.write(output_records, fd, "fasta") print("Wrote %d / %d [%d unique] sequences: %s" % ( len(output_records), len(input_records), len(sequences), args.out)) if __name__ == '__main__': run()
/downloads-generation/allele_sequences/make_pseudosequences.py
""" Select allele sequences for pan-class II models by analyzing distances between each MHC residue and the peptide across a set of structures from PDB. """ from __future__ import print_function import sys import argparse import collections import os import operator import numpy import pandas import tqdm import atomium from mhc2flurry.fasta import read_fasta_to_dataframe parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( "alpha_aligned_fasta", metavar="FASTA", help="Aligned sequences") parser.add_argument( "beta_aligned_fasta", metavar="FASTA", help="Aligned sequences") parser.add_argument( "pdb_dir", metavar="DIR", help="Directory containing PDB structures") parser.add_argument( "--reference-allele", nargs=2, help="Alpha and beta alleles to use for position numbering.") parser.add_argument( "--reference-structure", action="append", default=[], help="Structures to write out with b-factors rewritten according to " "inclusion in pseudosequences(for visualization).") parser.add_argument( "--out-csv", help="Result file for sequences") parser.add_argument( "--out-aux-dir", help="Result DIR for extra information") parser.add_argument( "--cutoffs", default=[2.0, 4.0, 6.0, 8.0, 10.0], nargs="+", type=float, metavar="X", help="Cutoff distances to evaluate. Default: %(default)s.") parser.add_argument( "--criteria", nargs=3, type=float, action="append", default=[], required=True, metavar="X", help="Criteria for selecting a position. Triple of: min minor allele " "fraction, cutoff distance, fraction of structures with a contact at " "the given cutoff. May be specified any number of times.") parser.add_argument( "--peptide-chain-min-length", default=5, metavar="N", type=int, help="Default: %(default)s.") parser.add_argument( "--peptide-chain-max-length", default=50, metavar="N", type=int, help="Default: %(default)s.") parser.add_argument( "--subsample-pdb", metavar="N", type=int, help="Subsample to at most N PDB structures. For debugging.") def make_position_to_aligned_position_dict(aligned_sequence): result = {} position = 0 for (i, char) in enumerate(aligned_sequence): if char != "-": result[position] = i position += 1 return result def make_aligned_position_to_position_dict(aligned_sequence): result = {} position = 0 for (i, char) in enumerate(aligned_sequence): if char != "-": result[i] = position position += 1 return result def run(): args = parser.parse_args(sys.argv[1:]) print(args) alpha_aligned_df = read_fasta_to_dataframe( args.alpha_aligned_fasta, full_descriptions=True) alpha_aligned_df["kind"] = "alpha" beta_aligned_df = read_fasta_to_dataframe( args.beta_aligned_fasta, full_descriptions=True) beta_aligned_df["kind"] = "beta" aligned_df = pandas.concat( [alpha_aligned_df, beta_aligned_df], ignore_index=True) aligned_df["unaligned"] = aligned_df.sequence.str.replace("-", "") aligned_df = aligned_df.rename(columns={ "sequence": "aligned_sequence", }).set_index("sequence_id") non_pdb_aligned_df = aligned_df.loc[ ~aligned_df.index.str.startswith("pdb") ].copy() minor_allele_fraction_df = [] for kind, sub_df in non_pdb_aligned_df.groupby("kind"): print("Calculating minor allelic fractions: ", kind) (length,) = sub_df.aligned_sequence.str.len().unique() for pos in tqdm.tqdm(range(length)): s = sub_df.aligned_sequence.str.get(pos) mode = s.mode()[0] maf = (s != mode).mean() minor_allele_fraction_df.append((kind, pos, mode, maf)) minor_allele_fraction_df = pandas.DataFrame( minor_allele_fraction_df, columns=[ "mhc_chain_kind", "mhc_residue_aligned", "major_allele", "minor_allele_fraction", ]) minor_allele_fraction_df = minor_allele_fraction_df.set_index( ["mhc_chain_kind", "mhc_residue_aligned"]) print(minor_allele_fraction_df) pdb_aligned_df = aligned_df.loc[ aligned_df.index.str.startswith("pdb") ].copy() pdb_aligned_df["accession"] = pdb_aligned_df.index.str.split(".").str.get( 1).str.split("_").str.get(0) pdb_aligned_df["chain"] = pdb_aligned_df.index.str.split("_").str.get(-1) if args.subsample_pdb: keep_accessions = list( pandas.Series( pdb_aligned_df.accession.unique()).sample( n=args.subsample_pdb)) + args.reference_structure pdb_aligned_df = pdb_aligned_df.loc[ pdb_aligned_df.accession.isin(keep_accessions) ].copy() info_by_accession = {} contacts_df = [] for accession, sub_df in tqdm.tqdm( pdb_aligned_df.groupby("accession"), total=pdb_aligned_df.accession.nunique()): sub_df = sub_df.set_index("chain") alpha_chains = sub_df.loc[sub_df.kind == "alpha"].index.values beta_chains = sub_df.loc[sub_df.kind == "beta"].index.values mhc_chain_to_kind = {} for chain in alpha_chains: mhc_chain_to_kind[chain] = "alpha" for chain in beta_chains: mhc_chain_to_kind[chain] = "beta" if len(alpha_chains) != len(beta_chains): print( "Skipping", accession, "because num chains for alpha != beta", len(alpha_chains), len(beta_chains)) continue structure = atomium.open( os.path.join( args.pdb_dir, "%s.cif.gz" % accession)).model peptides = [ c for c in structure.chains() if len(c) >= args.peptide_chain_min_length and len(c) <= args.peptide_chain_max_length ] if len(peptides) == 0: print("Skipping", accession, "because no peptides") continue structure.optimise_distances() if accession in args.reference_structure: # Save for later info_by_accession[accession] = { "structure": structure, "peptides": peptides, "mhc_chain_to_kind": mhc_chain_to_kind, "aligned_df": sub_df.copy(), } mhc_chain_to_position_map = {} for chain in mhc_chain_to_kind: mhc_chain_to_position_map[chain] = make_position_to_aligned_position_dict( sub_df.loc[chain, "aligned_sequence"]) for peptide in peptides: seen = set() for cutoff in sorted(args.cutoffs): nearby = [ r for r in peptide.nearby_hets( cutoff=cutoff, residues=True, ligands=False) if r not in seen ] seen.update(nearby) for residue in nearby: kind = mhc_chain_to_kind.get(residue.chain.id) if kind is not None: index = residue.chain.residues().index(residue) row = sub_df.loc[residue.chain.id] numpy.testing.assert_equal( residue.code, row.unaligned[index]) aligned_position = ( mhc_chain_to_position_map[residue.chain.id][index]) numpy.testing.assert_equal( residue.code, row.aligned_sequence[aligned_position]) contacts_df.append(( accession, cutoff, peptide.id, residue.chain.id, kind, index, aligned_position, residue.code)) contacts_df = pandas.DataFrame( contacts_df, columns=[ "accession", "cutoff", "peptide_chain", "mhc_chain", "mhc_chain_kind", "mhc_residue_unaligned", "mhc_residue_aligned", "mhc_residue", ]) num_accessions = contacts_df.accession.nunique() positional_contact_rates_df = contacts_df.groupby( ["mhc_chain_kind", "mhc_residue_aligned", "cutoff"] ).accession.nunique().unstack().reindex( sorted(args.cutoffs), axis=1).fillna(0.0).cumsum(1) / num_accessions positional_df = minor_allele_fraction_df.merge( positional_contact_rates_df, how="left", left_index=True, right_index=True).fillna(0) # Criteria name -> alpha or beta -> list of positions criteria_to_positions = collections.OrderedDict() for (maf, cutoff, fraction) in args.criteria: name = "maf_%s_and_%s_within_%s_angstrom" % (maf, fraction, cutoff) positional_df[name] = ( (positional_df.minor_allele_fraction >= maf) & (positional_df[cutoff] >= fraction) ) positions = positional_df.loc[ positional_df[name] ].index.to_frame().reset_index(drop=True).groupby( "mhc_chain_kind" ).mhc_residue_aligned.unique().map(sorted).to_dict() criteria_to_positions[name] = positions print("Criteria", name, "selected:") for (k, v) in criteria_to_positions[name].items(): print(k, len(v)) pseudosequences_df = non_pdb_aligned_df.copy() for (criteria, d) in criteria_to_positions.items(): for kind in ["alpha", "beta"]: positions = d.get(kind, []) sub = pseudosequences_df.loc[ pseudosequences_df.kind == kind, ] pseudosequences_df.loc[ sub.index, criteria ] = sub.aligned_sequence.map( operator.itemgetter(*positions) ).map("".join).str.replace("-", "X") pseudosequences_df.index = pseudosequences_df.index.str.split().str.get(1) assert pseudosequences_df.index.value_counts().max() == 1 main_result_df = pseudosequences_df[ list(criteria_to_positions) + ["kind"] ].copy() main_result_df.to_csv(args.out_csv, index=True) print("Wrote %s: " % str(main_result_df.shape), args.out_csv) if args.out_aux_dir: if not os.path.exists(args.out_aux_dir): os.mkdir(args.out_aux_dir) filename = os.path.join(args.out_aux_dir, "aligned_sequences.csv") pseudosequences_df.to_csv(filename, index=True) print("Wrote: ", filename) filename = os.path.join(args.out_aux_dir, "contacts.csv") contacts_df.to_csv(filename, index=True) print("Wrote: ", filename) # Positional. We add reference allele position numbering and amino acids. if args.reference_allele: write_df = positional_df.copy() (alpha_reference, beta_reference) = args.reference_allele reference_name = "%s/%s" % (alpha_reference, beta_reference) reference_alleles = { "alpha": alpha_reference, "beta": beta_reference, } for kind in ["alpha", "beta"]: reference_allele = reference_alleles[kind] reference_sequence = pseudosequences_df.loc[ reference_allele, "aligned_sequence" ] position_map = make_aligned_position_to_position_dict( reference_sequence) write_df.loc[ kind, reference_name + " position" ] = write_df.loc[ kind ].index.map(position_map) write_df.loc[ kind, reference_name + " aa" ] = write_df.loc[ kind ].index.map(lambda pos: reference_sequence[pos]) filename = os.path.join(args.out_aux_dir, "positional.csv") write_df.to_csv(filename, index=True) print("Wrote: ", filename) # Reference structures # Write out reference structures with the "bvalue" atom property used # to indicate minor allele fractions / fraction of residues within a # given distance of the peptide / inclusion in pseudosequences. # This can be used to generate colored renderings showing these # properties, e.g. in pymol. # This "b-factor" hack is commonly used to store arbitrary user data # in a PDB file. There may be a better way for CIF files but I don't # know of one. for accession in args.reference_structure: positional_with_residues_df = positional_df.copy() positional_with_residues_df[ "residues" ] = positional_with_residues_df.index.map(lambda i: []) info = info_by_accession.get(accession) if not info: print("No info for reference structure", accession) continue structure = info['structure'] for chain, row in info['aligned_df'].iterrows(): position_map = make_position_to_aligned_position_dict( row.aligned_sequence) residues_df = pandas.DataFrame({ "residue": structure.chain(chain).residues(), }) residues_df["aligned_position"] = residues_df.index.map( position_map) for _, residue_row in residues_df.iterrows(): positional_with_residues_df.loc[ (row.kind, residue_row.aligned_position), "residues" ].append(residue_row.residue) positional_with_residues_df = positional_with_residues_df.loc[ positional_with_residues_df.residues.str.len() > 0 ] quantitative_columns = positional_with_residues_df.dtypes.loc[ (positional_with_residues_df.dtypes == float) | (positional_with_residues_df.dtypes == bool) ].index for atom in structure.atoms(): atom.bvalue = 0 for col in quantitative_columns: # Assign bfactors based on the particular column. for _, row in positional_with_residues_df.iterrows(): for residue in row.residues: for atom in residue.atoms(): atom.bvalue = float(row[col]) * 100.0 # Write out the file with modified bvalues. filename = os.path.join( args.out_aux_dir, "%s.%s.cif" % (accession, col)) structure.save(filename) print("Wrote:", filename) if __name__ == '__main__': run()
/downloads-generation/data_curated/annotate_proteins.py
""" Given a CSV where some column indicates peptides, add a column indicating which protein(s) from some specified proteome contain that peptide. """ import argparse import time import sys import tqdm import pandas import numpy import shellinford from mhc2flurry.fasta import read_fasta_to_dataframe parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( "reference", metavar="FASTA", help="Fasta proteome to search.") parser.add_argument( "--annotate", action="append", default=[], nargs=2, metavar="CSV", help="Input and output file pairs. Specify this argument multiple times " "to process multiple input files, each of which will be written to its " "respective output file. The output file can be specified as '-' to " "overwrite the input file.") parser.add_argument( "--peptide-column", default="peptide", help="Name of column that gives peptides. Default: %(default)s") parser.add_argument( "--protein-column", default="proteins", help="Name of column to write proteins. Default: %(default)s") parser.add_argument( "--full-descriptions", default=False, action="store_true", help="Write the full protein descriptions, not just the IDs.") parser.add_argument( "--join-character", default=" ", help="Separator to use between protein names. Default: '%(default)s'") parser.add_argument( "--fm-index-suffix", metavar="SUFFIX", help="Use a pre-existing fm index found by concatenating SUFFIX onto each " "input fasta filename.") def run(): args = parser.parse_args(sys.argv[1:]) peptides = set() input_filename_df_and_output_filename = [] for (input, output) in args.annotate: if output.strip() == "-": output = input df = pandas.read_csv(input) print("Read peptides", input) print(df) input_filename_df_and_output_filename.append((input, df, output)) peptides.update(df[args.peptide_column].unique()) print("Read %d peptides to annotate" % len(peptides)) proteome_df = read_fasta_to_dataframe( args.reference, full_descriptions=args.full_descriptions) print("Read proteome:") print(proteome_df) fm = shellinford.FMIndex() start = time.time() if args.fm_index_suffix: name = args.reference + args.fm_index_suffix print("Using pre-existing fm index", name) fm.read(name) print("Read in %0.3f sec." % (time.time() - start)) else: print("Building FM index") fm.build(proteome_df.sequence.tolist()) print("Built index of %d sequences in %0.3f sec." % ( len(proteome_df), time.time() - start)) print("Annotating peptides") peptide_to_matches = {} for peptide in tqdm.tqdm(peptides): matches = [item.doc_id for item in fm.search(peptide)] names = args.join_character.join( proteome_df.loc[matches, "sequence_id"].values) peptide_to_matches[peptide] = names print("Writing files") for (input, df, output) in input_filename_df_and_output_filename: print(input) df[args.protein_column] = df[args.peptide_column].map( peptide_to_matches) df.to_csv(output, index=False) print("Wrote", output) if __name__ == '__main__': run()
/downloads-generation/data_curated/curate_ms_by_pmid.py
""" Filter and combine various peptide/MHC datasets to derive a composite training set, optionally including eluted peptides identified by mass-spec. The handle_pmid_XXXX functions should return a DataFrame with columns: - peptide - sample_id - hla [space separated list of alleles] - pulldown_antibody - format [monoallelic, multiallelic, DR-specific] - mhc_class [should be II] - sample type [an expression group, e.g. "spleen" or "expi293"] - cell_line [for samples deriving from a single known cell line] """ import sys import argparse import os import json import collections from six.moves import StringIO from mhc2flurry.common import normalize_allele_name import pandas parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( "--ms-item", nargs="+", action="append", metavar="PMID FILE, ... FILE", default=[], help="Mass spec item to curate: PMID and list of files") parser.add_argument( "--expression-item", nargs="+", action="append", metavar="LABEL FILE, ... FILE", default=[], help="Expression data to curate: dataset label and list of files") parser.add_argument( "--ms-out", metavar="OUT.csv", help="Out file path (MS data)") parser.add_argument( "--expression-out", metavar="OUT.csv", help="Out file path (RNA-seq expression)") parser.add_argument( "--expression-metadata-out", metavar="OUT.csv", help="Out file path for expression metadata, i.e. which samples used") parser.add_argument( "--debug", action="store_true", default=False, help="Leave user in pdb if PMID is unsupported") PMID_HANDLERS = {} EXPRESSION_HANDLERS = {} def load(filenames, **kwargs): result = {} for filename in filenames: if filename.endswith(".csv"): result[filename] = pandas.read_csv(filename, **kwargs) elif filename.endswith(".xlsx") or filename.endswith(".xls"): result[filename] = pandas.read_excel(filename, **kwargs) else: result[filename] = filename return result def debug(*filenames): loaded = load(filenames) import ipdb ipdb.set_trace() PMID_31495665_SAMPLE_TYPES = { "HLA-DR_A375": "a375", "HLA-DR_Lung": "lung", "HLA-DR_PBMC_HDSC": "pbmc", "HLA-DR_PBMC_RG1095": "pbmc", "HLA-DR_PBMC_RG1104": "pbmc", "HLA-DR_PBMC_RG1248": "pbmc", "HLA-DR_SILAC_Donor1_10minLysate": "pbmc", "HLA-DR_SILAC_Donor1_5hrLysate": "pbmc", "HLA-DR_SILAC_Donor1_DConly": "pbmc", "HLA-DR_SILAC_Donor1_UVovernight": "pbmc", "HLA-DR_SILAC_Donor2_DC_UV_16hr": "pbmc", "HLA-DR_SILAC_Donor2_DC_UV_24hr": "pbmc", "HLA-DR_Spleen": "spleen", "MAPTAC_A*02:01": "mix:a375,expi293,hek293,hela", "MAPTAC_A*11:01": "mix:expi293,hela", "MAPTAC_A*32:01": "mix:a375,expi293,hela", "MAPTAC_B*07:02": "mix:a375,expi293,hela", "MAPTAC_B*45:01": "expi293", "MAPTAC_B*52:01": "mix:a375,expi293", "MAPTAC_C*03:03": "expi293", "MAPTAC_C*06:02": "mix:a375,expi293", "MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "expi293", "MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "expi293", "MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "expi293", "MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "expi293", "MAPTAC_DRB1*01:01": "mix:a375,b721,expi293,kg1,k562", "MAPTAC_DRB1*03:01": "expi293", "MAPTAC_DRB1*04:01": "expi293", "MAPTAC_DRB1*07:01": "mix:expi293,hek293", "MAPTAC_DRB1*11:01": "mix:expi293,k562,kg1", "MAPTAC_DRB1*12:01_dm+": "expi293", "MAPTAC_DRB1*12:01_dm-": "expi293", "MAPTAC_DRB1*15:01": "expi293", "MAPTAC_DRB3*01:01_dm+": "expi293", "MAPTAC_DRB3*01:01_dm-": "expi293", } CELL_LINE_MIXTURES = sorted( set( x for x in PMID_31495665_SAMPLE_TYPES.values() if x.startswith("mix:"))) def handle_pmid_25502872(filename): """Bergseng, ..., Sollid. Immunogenetics 2015 [PMID 25502872]""" return None def handle_pmid_26495903(*filenames): """Sofron, ..., Fugmann. Eur. J. Immunol. 2015 [PMID 26495903]""" return None def handle_pmid_26740625(*filenames): """Clement, ..., Santambrogio. J. Biol. Chem. 2016 [PMID 26740625]""" # Mouse with transgenic DRB*01:01, collected about 3,000 peptides. # Peptides are mouse-derived, MHC II is human. return None def handle_pmid_27452731(*filenames): """Heyder, ..., Ytterberg. Mol. Cell. Proteomics 2016 [PMID 27452731]""" return None def handle_pmid_27726376(*filenames): """Wang, ..., Costello. J. Proteom. Res. 2017""" return None def handle_pmid_28329770(*filenames): """Khodadoust, ..., Alizadeh. Nature 2017 [PMID 28329770]""" return None def handle_pmid_28467828(filename): """Ooi, ..., Kitching. Nature 2017 [PMID 28467828]""" return None def handle_pmid_29314611(filename): """Ritz, ..., Fugmann. Proteomics 2018 [PMID 29314611]""" hla_types = { "MAVER-1": "DRB1*01:01 DRB1*13:01 DRB3*02:02 DQA1*01:01 DQB1*05:01 DQA1*01:03 DQB1*06:03", "DOHH2": "DRB1*01:01 DRB1*15:01 DRB5*01:01 DQA1*01:01 DQB1*05:01 DQB1*06:02 DQA1*01:02", } pulldown_antibody = { "DR": "L243 (HLA-DR)", "DQ": "SPVL3 (HLA-DQ)", } format = { "DR": "DR-specific", "DQ": "DQ-specific", } result_dfs = [] dfs = pandas.read_excel( filename, sheet_name=None, skiprows=1, index_col="Sequence") for (label, df) in dfs.items(): label = label.upper() (cell_line, restriction) = label.split("_") result_df = pandas.DataFrame({"peptide": df.index.values}) result_df["sample_id"] = label result_df["cell_line"] = cell_line result_df["sample_type"] = "B-CELL" result_df["mhc_class"] = "II" result_df["hla"] = hla_types[cell_line] result_df["pulldown_antibody"] = pulldown_antibody[restriction] result_df["format"] = format[restriction] result_dfs.append(result_df) result_df = pandas.concat(result_dfs, ignore_index=True) return result_df def handle_pmid_29317506(*filenames): """Ting, ..., Rossjohn. J. Biol. Chem. 2018 [PMID 29317506]""" return None def handle_pmid_29632711(*filenames): """Nelde, ..., Walz. Oncoimmunology 2018 [PMID 29632711]""" return None def handle_pmid_31495665(filename): """Abelin, ..., Rooney Immunity 2019 [PMID 31495665]""" hla_type = { "HLA-DR_A375": "DRB1*07:01 DRB4*01:01 DRB1*04:05", "HLA-DR_Lung": "DRB1*01:01 DRB1*03:01 DRB3*01:01", "HLA-DR_PBMC_HDSC": "DRB1*03:01 DRB1*11:01 DRB3*01:01 DRB3*02:02", "HLA-DR_PBMC_RG1095": "DRB1*03:01 DRB1*11:01 DRB3*01:01 DRB3*02:02", "HLA-DR_PBMC_RG1104": "DRB1*01:01 DRB1*11:01 DRB3*02:02", "HLA-DR_PBMC_RG1248": "DRB1*03:01 DRB1*03:01 DRB3*01:01 DRB3*01:01", # Note: the paper and Data S1 are pretty confusing regarding the donor1 # and donor2 SILAC experiments. These HLA types are a best guess but # I am not 100% confident. "HLA-DR_SILAC_Donor1_10minLysate": "DRB1*07:01 DRB4*01:01", "HLA-DR_SILAC_Donor1_5hrLysate": "DRB1*07:01 DRB4*01:01", "HLA-DR_SILAC_Donor1_DConly": "DRB1*07:01 DRB4*01:01", "HLA-DR_SILAC_Donor1_UVovernight": "DRB1*07:01 DRB4*01:01", "HLA-DR_SILAC_Donor2_DC_UV_16hr": "DRB1*04:01 DRB4*01:03 DRB1*15:03 DRB5*01:01 DQB1*03:02 DQA1*01:02 DQB1*06:02 DQA1*03:01 DPB1*02:01 DPA1*01:03 DPB1*04:01", "HLA-DR_SILAC_Donor2_DC_UV_24hr": "DRB1*04:01 DRB4*01:03 DRB1*15:03 DRB5*01:01 DQB1*03:02 DQA1*01:02 DQB1*06:02 DQA1*03:01 DPB1*02:01 DPA1*01:03 DPB1*04:01", "HLA-DR_Spleen": "DRB1*04:01 DRB4*01:03 DRB1*15:03 DRB5*01:01", "MAPTAC_A*02:01": "HLA-A*02:01", "MAPTAC_A*11:01": "HLA-A*11:01", "MAPTAC_A*32:01": "HLA-A*32:01", "MAPTAC_B*07:02": "HLA-B*07:02", "MAPTAC_B*45:01": "HLA-B*45:01", "MAPTAC_B*52:01": "HLA-B*52:01", "MAPTAC_C*03:03": "HLA-C*03:03", "MAPTAC_C*06:02": "HLA-C*06:02", "MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "DPA1*01:03 DPB1*06:01", "MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "DPA1*01:03 DPB1*06:01", "MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "DQA1*01:02 DQB1*06:04", "MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "DQA1*01:02 DQB1*06:04", "MAPTAC_DRB1*01:01": "DRB1*01:01", "MAPTAC_DRB1*03:01": "DRB1*03:01", "MAPTAC_DRB1*04:01": "DRB1*04:01", "MAPTAC_DRB1*07:01": "DRB1*07:01", "MAPTAC_DRB1*11:01": "DRB1*11:01", "MAPTAC_DRB1*12:01_dm+": "DRB1*12:01", "MAPTAC_DRB1*12:01_dm-": "DRB1*12:01", "MAPTAC_DRB1*15:01": "DRB1*15:01", "MAPTAC_DRB3*01:01_dm+": "DRB3*01:01", "MAPTAC_DRB3*01:01_dm-": "DRB3*01:01", } pulldown_antibody = { "HLA-DR_A375": "L243+tal1b5 (HLA-DR)", "HLA-DR_Lung": "L243 (HLA-DR)", "HLA-DR_PBMC_HDSC": "tal1b5 (HLA-DR)", "HLA-DR_PBMC_RG1095": "tal1b5 (HLA-DR)", "HLA-DR_PBMC_RG1104": "tal1b5 (HLA-DR)", "HLA-DR_PBMC_RG1248": "tal1b5 (HLA-DR)", "HLA-DR_SILAC_Donor1_10minLysate": "L243 (HLA-DR)", "HLA-DR_SILAC_Donor1_5hrLysate": "L243 (HLA-DR)", "HLA-DR_SILAC_Donor1_DConly": "L243 (HLA-DR)", "HLA-DR_SILAC_Donor1_UVovernight": "L243 (HLA-DR)", "HLA-DR_SILAC_Donor2_DC_UV_16hr": "L243 (HLA-DR)", "HLA-DR_SILAC_Donor2_DC_UV_24hr": "L243 (HLA-DR)", "HLA-DR_Spleen": "L243 (HLA-DR)", "MAPTAC_A*02:01": "MAPTAC", "MAPTAC_A*11:01": "MAPTAC", "MAPTAC_A*32:01": "MAPTAC", "MAPTAC_B*07:02": "MAPTAC", "MAPTAC_B*45:01": "MAPTAC", "MAPTAC_B*52:01": "MAPTAC", "MAPTAC_C*03:03": "MAPTAC", "MAPTAC_C*06:02": "MAPTAC", "MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "MAPTAC", "MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "MAPTAC", "MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "MAPTAC", "MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "MAPTAC", "MAPTAC_DRB1*01:01": "MAPTAC", "MAPTAC_DRB1*03:01": "MAPTAC", "MAPTAC_DRB1*04:01": "MAPTAC", "MAPTAC_DRB1*07:01": "MAPTAC", "MAPTAC_DRB1*11:01": "MAPTAC", "MAPTAC_DRB1*12:01_dm+": "MAPTAC", "MAPTAC_DRB1*12:01_dm-": "MAPTAC", "MAPTAC_DRB1*15:01": "MAPTAC", "MAPTAC_DRB3*01:01_dm+": "MAPTAC", "MAPTAC_DRB3*01:01_dm-": "MAPTAC", } format = { "HLA-DR_A375": "DR-specific", "HLA-DR_Lung": "DR-specific", "HLA-DR_PBMC_HDSC": "DR-specific", "HLA-DR_PBMC_RG1095": "DR-specific", "HLA-DR_PBMC_RG1104": "DR-specific", "HLA-DR_PBMC_RG1248": "DR-specific", "HLA-DR_SILAC_Donor1_10minLysate": "DR-specific", "HLA-DR_SILAC_Donor1_5hrLysate": "DR-specific", "HLA-DR_SILAC_Donor1_DConly": "DR-specific", "HLA-DR_SILAC_Donor1_UVovernight": "DR-specific", "HLA-DR_SILAC_Donor2_DC_UV_16hr": "DR-specific", "HLA-DR_SILAC_Donor2_DC_UV_24hr": "DR-specific", "HLA-DR_Spleen": "DR-specific", "MAPTAC_A*02:01": "monoallelic", "MAPTAC_A*11:01": "monoallelic", "MAPTAC_A*32:01": "monoallelic", "MAPTAC_B*07:02": "monoallelic", "MAPTAC_B*45:01": "monoallelic", "MAPTAC_B*52:01": "monoallelic", "MAPTAC_C*03:03": "monoallelic", "MAPTAC_C*06:02": "monoallelic", "MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "monoallelic", "MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "monoallelic", "MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "monoallelic", "MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "monoallelic", "MAPTAC_DRB1*01:01": "monoallelic", "MAPTAC_DRB1*03:01": "monoallelic", "MAPTAC_DRB1*04:01": "monoallelic", "MAPTAC_DRB1*07:01": "monoallelic", "MAPTAC_DRB1*11:01": "monoallelic", "MAPTAC_DRB1*12:01_dm+": "monoallelic", "MAPTAC_DRB1*12:01_dm-": "monoallelic", "MAPTAC_DRB1*15:01": "monoallelic", "MAPTAC_DRB3*01:01_dm+": "monoallelic", "MAPTAC_DRB3*01:01_dm-": "monoallelic", } mhc_class = { "HLA-DR_A375": "II", "HLA-DR_Lung": "II", "HLA-DR_PBMC_HDSC": "II", "HLA-DR_PBMC_RG1095": "II", "HLA-DR_PBMC_RG1104": "II", "HLA-DR_PBMC_RG1248": "II", "HLA-DR_SILAC_Donor1_10minLysate": "II", "HLA-DR_SILAC_Donor1_5hrLysate": "II", "HLA-DR_SILAC_Donor1_DConly": "II", "HLA-DR_SILAC_Donor1_UVovernight": "II", "HLA-DR_SILAC_Donor2_DC_UV_16hr": "II", "HLA-DR_SILAC_Donor2_DC_UV_24hr": "II", "HLA-DR_Spleen": "II", "MAPTAC_A*02:01": "I", "MAPTAC_A*11:01": "I", "MAPTAC_A*32:01": "I", "MAPTAC_B*07:02": "I", "MAPTAC_B*45:01": "I", "MAPTAC_B*52:01": "I", "MAPTAC_C*03:03": "I", "MAPTAC_C*06:02": "I", "MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "II", "MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "II", "MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "II", "MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "II", "MAPTAC_DRB1*01:01": "II", "MAPTAC_DRB1*03:01": "II", "MAPTAC_DRB1*04:01": "II", "MAPTAC_DRB1*07:01": "II", "MAPTAC_DRB1*11:01": "II", "MAPTAC_DRB1*12:01_dm+": "II", "MAPTAC_DRB1*12:01_dm-": "II", "MAPTAC_DRB1*15:01": "II", "MAPTAC_DRB3*01:01_dm+": "II", "MAPTAC_DRB3*01:01_dm-": "II", } cell_line = { "HLA-DR_A375": "A375", "HLA-DR_Lung": "", "HLA-DR_PBMC_HDSC": "", "HLA-DR_PBMC_RG1095": "", "HLA-DR_PBMC_RG1104": "", "HLA-DR_PBMC_RG1248": "", "HLA-DR_SILAC_Donor1_10minLysate": "", "HLA-DR_SILAC_Donor1_5hrLysate": "", "HLA-DR_SILAC_Donor1_DConly": "", "HLA-DR_SILAC_Donor1_UVovernight": "", "HLA-DR_SILAC_Donor2_DC_UV_16hr": "", "HLA-DR_SILAC_Donor2_DC_UV_24hr": "", "HLA-DR_Spleen": "L243 (HLA-DR)", "HLA-DR_Spleen": "", "MAPTAC_A*02:01": "", "MAPTAC_A*11:01": "", "MAPTAC_A*32:01": "", "MAPTAC_B*07:02": "", "MAPTAC_B*45:01": "expi293", "MAPTAC_B*52:01": "", "MAPTAC_C*03:03": "expi293", "MAPTAC_C*06:02": "", "MAPTAC_DPB1*06:01/DPA1*01:03_dm+": "expi293", "MAPTAC_DPB1*06:01/DPA1*01:03_dm-": "expi293", "MAPTAC_DQB1*06:04/DQA1*01:02_dm+": "expi293", # don't actually see this in DataS1A! "MAPTAC_DQB1*06:04/DQA1*01:02_dm-": "expi293", "MAPTAC_DRB1*01:01": "", "MAPTAC_DRB1*03:01": "expi293", "MAPTAC_DRB1*04:01": "expi293", "MAPTAC_DRB1*07:01": "", "MAPTAC_DRB1*11:01": "", "MAPTAC_DRB1*12:01_dm+": "expi293", "MAPTAC_DRB1*12:01_dm-": "expi293", "MAPTAC_DRB1*15:01": "expi293", "MAPTAC_DRB3*01:01_dm+": "expi293", "MAPTAC_DRB3*01:01_dm-": "expi293", } df = pandas.read_excel(filename, sheet_name="DataS1B") results = [] for sample_id in df.columns: if hla_type[sample_id] is None: print("Intentionally skipping", sample_id) continue result_df = pandas.DataFrame({ "peptide": df[sample_id].dropna().values, }) result_df["sample_id"] = sample_id result_df["hla"] = hla_type[sample_id] result_df["pulldown_antibody"] = pulldown_antibody[sample_id] result_df["format"] = format[sample_id] result_df["mhc_class"] = mhc_class[sample_id] result_df["sample_type"] = PMID_31495665_SAMPLE_TYPES[sample_id] result_df["cell_line"] = cell_line[sample_id] results.append(result_df) result_df = pandas.concat(results, ignore_index=True) result_df = result_df.loc[ result_df.mhc_class == "II" ] return result_df def handle_pmid_31611696(data_s1_filename, data_s2_filename): """Racle, ..., Gfeller. Nature Biotechnology 2019 [PMID 31611696]""" data_s1 = pandas.read_csv( data_s1_filename, sep=None, engine="python").set_index("Sequence") data_s2 = pandas.read_csv( data_s2_filename, sep=None, engine="python").set_index("Sequence") # HLA typing is given as a PDF in Supplementary Table 1. # In cases of ambiguous assignment we use the primary assignment. text = """ 3808_HMC MENINGIOMA DRB1*03:01 DRB1*07:01 DRB3*01:01 DRB4*01:01 DPA1*01:03 DPA1*02:01 DPB1*03:01 DPB1*11:01 DQA1*02:01 DQA1*05:01 DQB1*02:01 DQB1*02:02 3830_NJF MENINGIOMA DRB1*04:04 DRB1*11:01 DRB3*02:02 DRB4*01:03 DPA1*01:03 DPB1*02:01 DPB1*06:01 DQA1*03:01 DQA1*05:05 DQB1*03:01 DQB1*03:02 3849BR MENINGIOMA DRB1*11:04 DRB3*02:02 DPA1*01:03 DPB1*02:01 DPB1*04:01 DQA1*05:05 DQB1*03:01 3865_DM MENINGIOMA DRB1*01:01 DRB1*07:01 DRB4*01:03 DPA1*01:03 DPB1*04:01 DPB1*20:01 DQA1*01:01 DQA1*02:01 DQB1*03:03 DQB1*05:01 3869_GA MENINGIOMA DRB1*01:03 DRB1*04:04 DRB4*01:03 DPA1*01:03 DPB1*04:01 DPB1*126:01 DQA1*03:01 DQA1*05:05 DQB1*03:01 DQB1*03:02 3911_ME MENINGIOMA DRB1*11:01 DRB3*02:02 DPA1*01:03 DPB1*04:01 DQA1*05:05 DQB1*03:01 3912_BAM MENINGIOMA DRB1*03:01 DRB1*04:01 DRB3*01:01 DRB4*01:03 DPA1*01:03 DPB1*04:01 DQA1*03:01 DQA1*05:01 DQB1*02:01 DQB1*03:02 3947_GA MENINGIOMA DRB1*01:01 DRB1*13:01 DRB3*01:01 DPA1*01:03 DPB1*02:01 DPB1*04:02 DQA1*01:01 DQA1*01:03 DQB1*05:01 DQB1*06:03 3971_ORA MENINGIOMA DRB1*13:03 DRB1*07:01 DRB3*01:01 DRB4*01:01 DPA1*01:03 DPA1*02:02 DPB1*04:01 DQA1*02:01 DQA1*05:05 DQB1*02:02 DQB1*03:01 3993 MENINGIOMA DRB1*07:01 DRB1*15:01 DRB4*01:03 DRB5*01:01 DPA1*01:03 DPA1*02:01 DPB1*04:01 DPB1*17:01 DQA1*01:02 DQA1*02:01 DQB1*02:02 DQB1*06:02 4001 MENINGIOMA DRB1*13:01 DRB1*14:01 DRB3*01:01 DRB3*02:02 DPA1*01:03 DPB1*04:01 DPB1*04:02 DQA1*01:03 DQA1*01:04 DQB1*05:03 DQB1*06:03 4021 MENINGIOMA DRB1*11:01 DRB1*04:05 DRB3*02:02 DRB4*01:03 DPA1*01:03 DPB1*03:01 DPB1*104:01 DQA1*03:03 DQA1*05:05 DQB1*02:02 DQB1*03:01 4037_DC MENINGIOMA DRB1*01:01 DPA1*01:03 DPB1*04:01 DPB1*06:01 DQA1*01:01 DQB1*05:01 4052_BA MENINGIOMA DRB1*03:01 DRB1*11:04 DRB3*01:01 DRB3*02:02 DPA1*01:03 DPB1*04:01 DQA1*05:01 DQA1*05:05 DQB1*02:01 DQB1*03:01 BP455 B-CELL DRB1*10:01 DRB1*13:01 DRB3*01:01 DPA1*01:03 DPB1*02:01 DQA1*01:05 DQA1*01:10 DQB1*05:01 DQB1*06:03 CD165 B-CELL DRB1*11:01 DRB3*02:02 DPA1*01:03 DPB1*04:01 DPB1*04:02 DQA1*05:05 DQB1*03:01 CM647 B-CELL DRB1*07:01 DRB1*16:01 DRB4*01:03 DRB5*02:02 DPA1*01:03 DPB1*02:01 DPB1*23:01 DQA1*01:02 DQA1*02:01 DQB1*02:02 DQB1*05:02 GD149 B-CELL DRB1*07:01 DRB1*13:01 DRB3*01:01 DRB4*01:01 DPA1*01:03 DPA1*02:01 DPB1*03:01 DPB1*04:01 DQA1*01:10 DQA1*02:01 DQB1*02:02 DQB1*06:03 JY B-CELL DRB1*04:04 DRB1*13:01 DRB3*01:01 DRB4*01:03 DPA1*01:03 DPB1*02:01 DPB1*04:01 DQA1*01:03 DQA1*03:01 DQB1*03:02 DQB1*06:03 PD42 B-CELL DRB1*01:02 DRB1*15:01 DRB5*01:01 DPA1*01:03 DPA1*02:02 DPB1*04:01 DPB1*05:01 DQA1*01:01 DQA1*01:02 DQB1*05:01 DQB1*06:02 RA957 B-CELL DRB1*04:01 DRB1*08:01 DRB4*01:03 DPA1*01:03 DPB1*04:01 DPB1*04:02 DQA1*03:03 DQA1*04:01 DQB1*03:01 DQB1*04:02 TIL1 TIL DRB1*01:01 DRB1*04:08 DRB4*01:03 DPA1*01:03 DPB1*02:01 DPB1*04:01 DQA1*01:01 DQA1*03:03 DQB1*03:01 DQB1*05:01 TIL3 TIL DRB1*12:01 DRB1*15:01 DRB3*02:02 DRB5*01:01 DPA1*01:03 DPB1*03:01 DPB1*04:01 DQA1*01:02 DQA1*05:05 DQB1*03:01 DQB1*05:02 """ rows = [ row.split() for row in text.strip().split("\n") ] rows = [ (row[0].replace("_", "-"), row[1], " ".join(row[2:])) for row in rows ] info_df = pandas.DataFrame(rows, columns=["kind", "sample_type", "hla"]) info_df = info_df.set_index("kind") # Data S1 renames = { c : c.replace("Intensity", "").replace("_II", "").strip() for c in data_s1.columns if c.startswith("Intensity") } data_s1 = data_s1[sorted(renames)].rename(columns=renames).rename(columns={ "3830NJF": "3830-NJF", "3865DM": "3865-DM", "3912BAM": "3912-BAM", "3865DM": "3865-DM", "CD165_ IFNg": "CD165_IFNg", }) result1_df = data_s1.stack().reset_index() result1_df.columns = ["peptide", "sample_id", "intensity"] result1_df = result1_df.loc[result1_df.intensity > 0] result1_df["kind"] = result1_df.sample_id.map(lambda s: { "JY_DR": "JY", "CD165_IFNg": "CD165", }.get(s, s)) result1_df["hla"] = result1_df.kind.map(info_df.hla) result1_df["pulldown_antibody"] = "HB145" result1_df["format"] = "MULTIALLELIC" result1_df.loc[ result1_df.sample_id == "JY_DR", "format" ] = "DR-specific" result1_df["mhc_class"] = "II" result1_df["sample_type"] = result1_df.kind.map(info_df.sample_type) result1_df["cell_line"] = [ row.kind if row.sample_type == "B-CELL" else "" for _, row in result1_df.iterrows() ] del result1_df["kind"] # Data S2 renames = { c : c.replace("Intensity", "").replace("_II", "").strip() for c in data_s2.columns if c.startswith("Intensity") } data_s2 = data_s2[sorted(renames)].rename(columns=renames).rename(columns={ "3830NJF": "3830-NJF", "3865DM": "3865-DM", "3912BAM": "3912-BAM", "3865DM": "3865-DM", "CD165_ IFNg": "CD165_IFNg", }) result2_df = data_s2.stack().reset_index() result2_df.columns = ["peptide", "sample_id", "intensity"] result2_df["kind"] = result2_df.sample_id.str.replace( "-HLA-DR", "").str.replace("-depleted", "").str.replace("_", "-") result2_df["hla"] = result2_df.kind.map(info_df.hla) result2_df["pulldown_antibody"] = "" assert all(result2_df.sample_id.map( lambda s: s.endswith("DR-depleted") or s.endswith("-DR"))) result2_df["format"] = result2_df.sample_id.map( lambda s: "DR-depleted" if "DR-depleted" in s else "DR-specific") result2_df["mhc_class"] = "II" result2_df["sample_type"] = result2_df.kind.map(info_df.sample_type) result2_df["cell_line"] = [ row.kind if row.sample_type == "B-CELL" else "" for _, row in result2_df.iterrows() ] del result2_df["kind"] result_df = pandas.concat([result1_df, result2_df], ignore_index=True) # DR-specific samples used HB298 antibody result_df.loc[ result_df.format == "DR-specific", "pulldown_antibody" ] = "HB298" # Subsample alleles to just DR alleles for DR-specific samples. result_df.loc[ result_df.format == "DR-specific", "hla" ] = result_df.loc[result_df.format == "DR-specific", "hla"].map( lambda s: " ".join([allele for allele in s.split() if "DR" in allele]) ) del result_df["intensity"] return result_df def handle_pmid_27869121(filename): """Bassani-Sternberg, ..., Krackhardt Nature Comm. 2016 [PMID 27869121]""" # While this data set includes class II ligands, unfortunately the HLA # typing (Supp Table 2) seems to be class I only. So we skip this dataset. return None EXPRESSION_GROUPS_ROWS = [] def make_expression_groups(dataset_identifier, df, groups): result_df = pandas.DataFrame(index=df.index) for (label, columns) in groups.items(): for col in columns: if col not in df.columns: raise ValueError( "Missing: %s. Available: %s" % (col, df.columns.tolist())) result_df[label] = df[columns].mean(1) EXPRESSION_GROUPS_ROWS.append((dataset_identifier, label, columns)) return result_df def handle_expression_GSE113126(*filenames): """ Barry, ..., Krummel Nature Medicine 2018 [PMID 29942093] This is the melanoma met RNA-seq dataset. """ df = pandas.read_csv(filenames[0], sep="\t", index_col=0) df = df[[]] # no columns for filename in filenames: df[os.path.basename(filename)] = pandas.read_csv( filename, sep="\t", index_col=0)["TPM"] assert len(df.columns) == len(filenames) groups = { "sample_type:MELANOMA_MET": df.columns.tolist(), } return [make_expression_groups("GSE113126", df, groups)] def handle_expression_expression_atlas_22460905(filename): df = pandas.read_csv(filename, sep="\t", skiprows=4, index_col=0) del df["Gene Name"] df.columns = df.columns.str.lower() df = df.fillna(0.0) def matches(*strings): return [c for c in df.columns if all(s in c for s in strings)] groups = { "sample_type:B-LCL": ( matches("b-cell", "lymphoblast") + matches("b acute lymphoblastic")), "sample_type:B-CELL": matches("b-cell"), "sample_type:B721-LIKE": matches("b-cell"), "sample_type:MELANOMA_CELL_LINE": matches("melanoma"), "sample_type:MELANOMA": matches("melanoma"), "sample_type:KG1-LIKE": matches("myeloid leukemia"), # Using a fibrosarcoma cell line for our fibroblast sample. "sample_type:FIBROBLAST": ['fibrosarcoma, ht-1080'], # For GBM tissue we are just using a mixture of cell lines. "sample_type:GLIOBLASTOMA_TISSUE": matches("glioblastoma"), "cell_line:A375": ['amelanotic melanoma, a-375'], "cell_line:THP-1": ["childhood acute monocytic leukemia, thp-1"], "cell_line:HL-60": ["adult acute myeloid leukemia, hl-60"], "cell_line:U-87": ['glioblastoma, u-87 mg'], "cell_line:LNT-229": ['glioblastoma, ln-229'], "cell_line:T98G": ['glioblastoma, t98g'], "cell_line:SK-MEL-5": ['cutaneous melanoma, sk-mel-5'], 'cell_line:MEWO': ['melanoma, mewo'], "cell_line:HCC1937": ['breast ductal adenocarcinoma, hcc1937'], "cell_line:HCT116": ['colon carcinoma, hct 116'], "cell_line:HCC1143": ['breast ductal adenocarcinoma, hcc1143'], } return [make_expression_groups("expression_atlas_22460905", df, groups)] def handle_expression_human_protein_atlas(*filenames): (cell_line_filename,) = [f for f in filenames if "celline" in f] (blood_filename,) = [f for f in filenames if "blood" in f] (gtex_filename,) = [f for f in filenames if "gtex" in f] cell_line_df = pandas.read_csv(cell_line_filename, sep="\t") blood_df = pandas.read_csv(blood_filename, sep="\t", index_col=0) gtex_df = pandas.read_csv(gtex_filename, sep="\t") cell_line_df = cell_line_df.pivot( index="Gene", columns="Cell line", values="TPM") gtex_df = gtex_df.pivot( index="Gene", columns="Tissue", values="TPM") return [ make_expression_groups( "human_protein_atlas:%s" % os.path.basename(blood_filename), blood_df, groups={ "sample_type:PBMC": [ c for c in blood_df.columns if "total PBMC" in c ], # for samples labeled leukapheresis we also use PBMC "sample_type:LEUKAPHERESIS": [ c for c in blood_df.columns if "total PBMC" in c ], # for samples labeled TIL we are also using PBMC "sample_type:TIL": [ c for c in blood_df.columns if "total PBMC" in c ], }), make_expression_groups( "human_protein_atlas:%s" % os.path.basename(cell_line_filename), cell_line_df, groups={ "cell_line:HELA": ['HeLa'], "cell_line:K562": ["K-562"], "cell_line:HEK293": ['HEK 293'], "cell_line:RPMI8226": ['RPMI-8226'], "cell_line:EXPI293": ['HEK 293'], # EXPI293 derived from HEK293 }), make_expression_groups( "human_protein_atlas:%s" % os.path.basename(gtex_filename), gtex_df, groups={ "sample_type:LUNG": ["lung"], "sample_type:SPLEEN": ["spleen"], "sample_type:OVARY": ["ovary"], "sample_type:KIDNEY": ["kidney"], # This is bad! I just can't find anything better currently. # We should find some meningioma RNA-seq and switch to that. "sample_type:MENINGIOMA": [ "amygdala", "basal ganglia", "cerebellum", "cerebral cortex", "midbrain", "spinal cord", ], }), ] def make_expression_mixtures(expression_df): global CELL_LINE_MIXTURES groups = {} for mix in CELL_LINE_MIXTURES: components = [] for item in mix.replace("mix:", "").upper().split(","): if "cell_line:%s" % item in expression_df.columns: components.append("cell_line:%s" % item) else: print("No cell line, falling back on similar: ", item) components.append("sample_type:%s-LIKE" % item) groups["sample_type:" + mix.upper()] = components missing = set() for some in groups.values(): for item in some: if item not in expression_df.columns: missing.add(item) if missing: raise ValueError( "Missing [%d]: %s. Available: %s" % ( len(missing), missing, expression_df.columns.tolist())) return make_expression_groups("mixtures", expression_df, groups) # Add all functions with names like handle_pmid_XXXX to PMID_HANDLERS dict. for (key, value) in list(locals().items()): if key.startswith("handle_pmid_"): PMID_HANDLERS[key.replace("handle_pmid_", "")] = value elif key.startswith("handle_expression_"): EXPRESSION_HANDLERS[key.replace("handle_expression_", "")] = value def run(): args = parser.parse_args(sys.argv[1:]) expression_dfs = [] for (i, item_tpl) in enumerate(args.expression_item): (label, filenames) = (item_tpl[0], item_tpl[1:]) label = label.replace("-", "_") print( "Processing expression item %d of %d" % (i + 1, len(args.expression_item)), label, *[os.path.abspath(f) for f in filenames]) expression_dfs_for_item = [] handler = None if label in EXPRESSION_HANDLERS: handler = EXPRESSION_HANDLERS[label] expression_dfs_for_item = handler(*filenames) elif args.debug: debug(*filenames) else: raise NotImplementedError(label) if expression_dfs_for_item: print( "Processed expression data", label, "result dataframes", len(expression_dfs_for_item)) print(*[e.columns for e in expression_dfs_for_item]) expression_dfs.extend(expression_dfs_for_item) expression_df = expression_dfs[0] for other in expression_dfs[1:]: expression_df = pandas.merge( expression_df, other, how='outer', left_index=True, right_index=True) print("Genes in each expression dataframe: ", *[len(e) for e in expression_dfs]) print("Genes in merged expression dataframe", len(expression_df)) if CELL_LINE_MIXTURES: print("Generating cell line mixtures.") expression_mixture_df = make_expression_mixtures(expression_df) expression_df = pandas.merge( expression_df, expression_mixture_df, how='outer', left_index=True, right_index=True) ms_dfs = [] for (i, item_tpl) in enumerate(args.ms_item): (pmid, filenames) = (item_tpl[0], item_tpl[1:]) print( "Processing MS item %d of %d" % (i + 1, len(args.ms_item)), pmid, *[os.path.abspath(f) for f in filenames]) ms_df = None handler = None if pmid in PMID_HANDLERS: handler = PMID_HANDLERS[pmid] ms_df = handler(*filenames) elif args.debug: debug(*filenames) else: raise NotImplementedError(pmid) if ms_df is not None: ms_df["pmid"] = pmid if "original_pmid" not in ms_df.columns: ms_df["original_pmid"] = pmid if "expression_dataset" not in ms_df.columns: ms_df["expression_dataset"] = "" ms_df = ms_df.applymap(str).applymap(str.upper) ms_df["sample_id"] = ms_df.sample_id.str.replace(" ", "") print("*** PMID %s: %d peptides ***" % (pmid, len(ms_df))) if handler is not None: print(handler.__doc__) print("Counts by sample id:") print(ms_df.groupby("sample_id").peptide.nunique()) print("") print("Counts by sample type:") print(ms_df.groupby("sample_type").peptide.nunique()) print("****************************") for value in ms_df.expression_dataset.unique(): if value and value not in expression_df.columns: raise ValueError("No such expression dataset", value) ms_dfs.append(ms_df) else: print("Skipping MS item", pmid) ms_df = pandas.concat(ms_dfs, ignore_index=True, sort=False) ms_df["cell_line"] = ms_df["cell_line"].fillna("") ms_df["hla"] = ms_df["hla"].str.strip().str.replace(r'\s+', ' ').map( lambda hla: " ".join( [ normalize_allele_name(a, raise_on_error=True) for a in hla.split() ])) for _, row in ms_df.drop_duplicates("hla").iterrows(): alleles = row.hla.split() for allele in alleles: # Catch pairs like HLA-DQA*01:01-DQB1*01:01. # We want only single alleles. They get paired up in analysis code. if "-" in allele.replace("HLA-", ""): raise ValueError( "Allele pair present: %s. In: %s\n%s" % ( allele, row.hla, row)) sample_table = ms_df[ [ "sample_id", "pmid", "format", "expression_dataset", "cell_line", "sample_type", ] ].drop_duplicates().set_index("sample_id") sample_id_to_expression_dataset = sample_table.expression_dataset.to_dict() for (sample_id, value) in sorted(sample_id_to_expression_dataset.items()): if value: print("Expression dataset for sample", sample_id, "already assigned") continue cell_line_col = "cell_line:" + sample_table.loc[sample_id, "cell_line"] sample_type_col = "sample_type:" + ( sample_table.loc[sample_id, "sample_type"]) expression_dataset = None for col in [cell_line_col, sample_type_col]: if col in expression_df.columns: expression_dataset = col break if not expression_dataset: print("*" * 20) print("No expression dataset for sample ", sample_id) print("Sample info:") print(sample_table.loc[sample_id]) print("*" * 20) sample_id_to_expression_dataset[sample_id] = expression_dataset print( "Sample", sample_id, "assigned exp. dataset", expression_dataset) print("Expression dataset usage:") print(pandas.Series(sample_id_to_expression_dataset).value_counts()) print("PMIDs by format:") print(sample_table.groupby("format").pmid.unique()) missing = [ key for (key, value) in sample_id_to_expression_dataset.items() if value is None ] if missing: print("Missing expression data for samples", *missing) print( "Missing cell lines: ", *sample_table.loc[missing, "cell_line"].dropna().drop_duplicates().tolist()) print("Missing sample types: ", *sample_table.loc[ missing, "sample_type"].dropna().drop_duplicates().tolist()) if args.debug: import ipdb; ipdb.set_trace() else: raise ValueError("Missing expression data for samples: ", missing) ms_df["expression_dataset"] = ms_df.sample_id.map( sample_id_to_expression_dataset) cols = [ "pmid", "sample_id", "peptide", "format", "mhc_class", "hla", "expression_dataset", ] cols += [c for c in sorted(ms_df.columns) if c not in cols] ms_df = ms_df[cols] null_df = ms_df.loc[ms_df.isnull().any(1)] if len(null_df) > 0: print("Nulls:") print(null_df) else: print("No nulls.") # Each sample should be coming from only one experiment. assert ms_df.groupby("sample_id").pmid.nunique().max() == 1, ( ms_df.groupby("sample_id").pmid.nunique().sort_values()) expression_df.to_csv(args.expression_out, index=True) print("Wrote: %s" % os.path.abspath(args.expression_out)) ms_df.to_csv(args.ms_out, index=False) print("Wrote: %s" % os.path.abspath(args.ms_out)) if args.expression_metadata_out is not None: expression_metadata_df = pandas.DataFrame( EXPRESSION_GROUPS_ROWS, columns=["expression_dataset", "label", "samples"]) expression_metadata_df["samples"] = expression_metadata_df[ "samples" ].map(json.dumps) expression_metadata_df.to_csv(args.expression_metadata_out, index=False) print("Wrote: %s" % os.path.abspath(args.expression_metadata_out)) if __name__ == '__main__': run()
/downloads-generation/data_curated/curate_t_cell_epitopes.py
""" Curate IEDB T cell epitopes. Currently this doesn't do much except rename the peptide column from "Description" to "peptide". """ import sys import argparse import pandas from mhc2flurry.amino_acid import COMMON_AMINO_ACIDS parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( "--data-iedb", metavar="tcell_full_v3.csv", help="Path to IEDB-style T cell epitope data") parser.add_argument( "--max-epitopes", metavar="N", type=int, help="Process first N epitopes (for debugging)") parser.add_argument( "--out-csv", required=True, help="Result file") def run(): args = parser.parse_args(sys.argv[1:]) epitopes_df = pandas.read_csv( args.data_iedb, skiprows=1, nrows=args.max_epitopes) print("Read epitopes", *epitopes_df.shape) print(epitopes_df) epitopes_df.insert(0, "peptide", epitopes_df.Description) aa_regex = "^[%s]+$" % "".join(sorted(COMMON_AMINO_ACIDS)) epitopes_df = epitopes_df.loc[ epitopes_df.peptide.str.match(aa_regex) & (epitopes_df.peptide.str.len() >= 5) ] print("Epitopes with valid peptides", len(epitopes_df)) print("Generated result", *epitopes_df.shape) print(epitopes_df) epitopes_df.to_csv(args.out_csv, index=False) print("Wrote", args.out_csv) if __name__ == '__main__': run()
/downloads-generation/data_pdb/make_pdb_query.py
# Just print a JSON PDB query to stdout # Doing this in a python script so we have comments. import json sequences = [] # DRA1*01:01 sequences.append( "MAISGVPVLGFFIIAVLMSAQESWAIKEEHVIIQAEFYLNPDQSGEFMFDFDGDEIFHVDMAKKETVWRLEEFGRF" "ASFEAQGALANIAVDKANLEIMTKRSNYTPITNVPPEVTVLTNSPVELREPNVLICFIDKFTPPVVNVTWLRNGKP" "VTTGVSETVFLPREDHLFRKFHYLPFLPSTEDVYDCRVEHWGLDEPLLKHWEFDAPSPLPETTENVVCALGLTVGL" "VGIIIGTIFIIKGVRKSNAAERRGPL") # DRB1*01:01 sequences.append( "MVCLKLPGGSCMTALTVTLMVLSSPLALAGDTRPRFLWQLKFECHFFNGTERVRLLERCIYNQEESVRFDSDVGEY" "RAVTELGRPDAEYWNSQKDLLEQRRAAVDTYCRHNYGVGESFTVQRRVEPKVTVYPSKTQPLQHHNLLVCSVSGFY" "PGSIEVRWFRNGQEEKAGVVSTGLIQNGDWTFQTLVMLETVPRSGEVYTCQVEHPSVTSPLTVEWRARSESAQSKM" "LSGVGGFVLGLLFLGAGLFIYFRNQKGHSGLQPTGFLS") # DRB3*01:01 sequences.append( "MVCLKLPGGSSLAALTVTLMVLSSRLAFAGDTRPRFLELRKSECHFFNGTERVRYLDRYFHNQEEFLRFDSDVGEY" "RAVTELGRPVAESWNSQKDLLEQKRGRVDNYCRHNYGVGESFTVQRRVHPQVTVYPAKTQPLQHHNLLVCSVSGFY" "PGSIEVRWFRNGQEEKAGVVSTGLIQNGDWTFQTLVMLETVPRSGEVYTCQVEHPSVTSALTVEWRARSESAQSKM" "LSGVGGFVLGLLFLGAGLFIYFRNQKGHSGLQPTGFLS") # DRB4*01:01 sequences.append( "MVCLKLPGGSCMAALTVTLTVLSSPLALAGDTQPRFLEQAKCECHFLNGTERVWNLIRYI" "YNQEEYARYNSDLGEYQAVTELGRPDAEYWNSQKDLLERRRAEVDTYCRYNYGVVESFTV" "QRRVQPKVTVYPSKTQPLQHHNLLVCSVNGFYPGSIEVRWFRNSQEEKAGVVSTGLIQNG" "DWTFQTLVMLETVPRSGEVYTCQVEHPSMMSPLTVQWSARSESAQSKMLSGVGGFVLGLL" "FLGTGLFIYFRNQKGHSGLQPTGLLS") # DRB5*01:01 sequences.append( "MVCLKLPGGSYMAKLTVTLMVLSSPLALAGDTRPRFLQQDKYECHFFNGTERVRFLHRDIYNQEEDLRFDSDVGEY" "RAVTELGRPDAEYWNSQKDFLEDRRAAVDTYCRHNYGVGESFTVQRRVEPKVTVYPARTQTLQHHNLLVCSVNGFY" "PGSIEVRWFRNSQEEKAGVVSTGLIQNGDWTFQTLVMLETVPRSGEVYTCQVEHPSVTSPLTVEWRAQSESAQSKM" "LSGVGGFVLGLLFLGAGLFIYFKNQKGHSGLHPTGLVS") # HLA-DQB1*02:01 sequences.append( "MSWKKALRIPGGLRAATVTLMLSMLSTPVAEGRDSPEDFVYQFKGMCYFTNGTERVRLVS" "RSIYNREEIVRFDSDVGEFRAVTLLGLPAAEYWNSQKDILERKRAAVDRVCRHNYQLELR" "TTLQRRVEPTVTISPSRTEALNHHNLLVCSVTDFYPAQIKVRWFRNDQEETAGVVSTPLI" "RNGDWTFQILVMLEMTPQRGDVYTCHVEHPSLQSPITVEWRAQSESAQSKMLSGIGGFVL" "GLIFLGLGLIIHHRSQKGLLH") # HLA-DPB1*01:01 sequences.append( "MMVLQVSAAPRTVALTALLMVLLTSVVQGRATPENYVYQGRQECYAFNGTQRFLERYIYN" "REEYARFDSDVGEFRAVTELGRPAAEYWNSQKDILEEKRAVPDRVCRHNYELDEAVTLQR" "RVQPKVNVSPSKKGPLQHHNLLVCHVTDFYPGSIQVRWFLNGQEETAGVVSTNLIRNGDW" "TFQILVMLEMTPQQGDVYICQVEHTSLDSPVTVEWKAQSDSAQSKTLTGAGGFVLGLIIC" "GVGIFMHRRSKKVQRGSA") # Should be distinct assert len(sequences) == len(set(sequences)) def node_from_sequence(sequence): return { "type": "terminal", "service": "sequence", "parameters": { "evalue_cutoff": 10, "identity_cutoff": 0.5, "target": "pdb_protein_sequence", "value": sequence, } } query = { "query": { "type": "group", "logical_operator": "or", "nodes": [node_from_sequence(sequence) for sequence in sequences], }, "request_options": { "return_all_hits": True }, "return_type": "entry" } print(json.dumps(query))
/downloads-generation/data_pdb/parse_results.py
# From a PDB results json, print out a comma separated list of PDB IDs import argparse import sys import json parser = argparse.ArgumentParser() parser.add_argument("results", metavar="JSON") parser.add_argument("out", metavar="FILE") args = parser.parse_args(sys.argv[1:]) parsed = json.load(open(args.results)) print("Loaded %d results" % len(parsed['result_set'])) print("First result") print(parsed['result_set'][0]) print("Last result") print(parsed['result_set'][-1]) with open(args.out, "w") as fd: identifiers = [entry['identifier'] for entry in parsed['result_set']] fd.write(",".join(identifiers)) fd.write("\n") print("Wrote: ", args.out)
/downloads-generation/data_proteomes/index_fasta.py
""" Write a shellinford index for a fasta. """ import argparse import time import sys import shellinford from mhc2flurry.fasta import read_fasta_to_dataframe parser = argparse.ArgumentParser(usage=__doc__) parser.add_argument( "input", metavar="FASTA", help="Input file") parser.add_argument( "output", metavar="FM", help="Output file") def run(): args = parser.parse_args(sys.argv[1:]) df = read_fasta_to_dataframe(args.input) print("Read") print(df) print("Building FM index") start = time.time() fm = shellinford.FMIndex() fm.build(df.sequence.tolist()) print("Built index of %d sequences in %0.3f sec." % ( len(df), time.time() - start)) print("Writing index") fm.write(args.output) print("Wrote", args.output) if __name__ == '__main__': run()
/mhc2flurry/__init__.py
""" Class II MHC ligand prediction package """ #from .class2_affinity_predictor import Class2AffinityPredictor #from .class2_neural_network import Class2NeuralNetwork from .version import __version__ __all__ = [ "__version__", # "Class2AffinityPredictor", # "Class2NeuralNetwork", ]
/mhc2flurry/allele_encoding_pair.py
from .allele_encoding import AlleleEncoding class AlleleEncodingPair(object): def __init__( self, alpha_allele_encoding, beta_allele_encoding): """ """ self.alpha_allele_encoding = alpha_allele_encoding self.beta_allele_encoding = beta_allele_encoding def from_pairs(self, allele_pairs): alpha_alleles = [a for (a, b) in allele_pairs] beta_alleles = [b for (a, b) in allele_pairs] return AlleleEncodingPair( AlleleEncoding( alpha_alleles, borrow_from=self.alpha_allele_encoding), AlleleEncoding( beta_alleles, borrow_from=self.beta_allele_encoding), ) @property def allele_encodings(self): return [ ("alpha", self.alpha_allele_encoding), ("beta", self.beta_allele_encoding) ] @property def allele_pairs(self): return [ (a, b) for (a, b) in zip( self.alpha_allele_encoding.alleles, self.beta_allele_encoding.alleles) ]
/mhc2flurry/downloads.py
""" Manage local downloaded data. """ from __future__ import ( print_function, division, absolute_import, ) import logging import yaml from os.path import join, exists from os import environ from pipes import quote from collections import OrderedDict from appdirs import user_data_dir from pkg_resources import resource_string import pandas ENVIRONMENT_VARIABLES = [ "MHC2FLURRY_DATA_DIR", "MHC2FLURRY_DOWNLOADS_CURRENT_RELEASE", "MHC2FLURRY_DOWNLOADS_DIR", "MHC2FLURRY_DEFAULT_MODELS_DIR", "MHC2FLURRY_DOWNLOADS_GITHUB_AUTH_TOKEN" ] _DOWNLOADS_DIR = None _CURRENT_RELEASE = None _METADATA = None _MHC2FLURRY_DEFAULT_MODELS_DIR = environ.get( "MHC2FLURRY_DEFAULT_MODELS_DIR") def get_downloads_dir(): """ Return the path to local downloaded data """ return _DOWNLOADS_DIR def get_current_release(): """ Return the current downloaded data release """ return _CURRENT_RELEASE def get_downloads_metadata(): """ Return the contents of downloads.yml as a dict """ global _METADATA if _METADATA is None: _METADATA = yaml.safe_load(resource_string(__name__, "downloads.yml")) return _METADATA def get_default_class2_models_dir(test_exists=True): """ Return the absolute path to the default class2 models dir. If environment variable MHC2FLURRY_DEFAULT_MODELS_DIR is set to an absolute path, return that path. If it's set to a relative path (i.e. does not start with /) then return that path taken to be relative to the mhc2flurry downloads dir. If environment variable _MHC2FLURRY_DEFAULT_MODELS_DIR is NOT set, then return the path to downloaded models in the "models_class2" download. Parameters ---------- test_exists : boolean, optional Whether to raise an exception of the path does not exist Returns ------- string : absolute path """ if _MHC2FLURRY_DEFAULT_MODELS_DIR: result = join(get_downloads_dir(), _MHC2FLURRY_DEFAULT_MODELS_DIR) if test_exists and not exists(result): raise IOError("No such directory: %s" % result) return result return get_path( "models_class2", "models", test_exists=test_exists) def get_current_release_downloads(): """ Return a dict of all available downloads in the current release. The dict keys are the names of the downloads. The values are a dict with two entries: downloaded : bool Whether the download is currently available locally metadata : dict Info about the download from downloads.yml such as URL up_to_date : bool or None Whether the download URL(s) match what was used to download the current data. This is None if it cannot be determined. """ downloads = ( get_downloads_metadata() ['releases'] [get_current_release()] ['downloads']) def up_to_date(dir, urls): try: df = pandas.read_csv(join(dir, "DOWNLOAD_INFO.csv")) return list(df.url) == list(urls) except IOError: return None return OrderedDict( (download["name"], { 'downloaded': exists(join(get_downloads_dir(), download["name"])), 'up_to_date': up_to_date( join(get_downloads_dir(), download["name"]), [download['url']] if 'url' in download else download['part_urls']), 'metadata': download, }) for download in downloads ) def get_path(download_name, filename='', test_exists=True): """ Get the local path to a file in a MHC2flurry download Parameters ----------- download_name : string filename : string Relative path within the download to the file of interest test_exists : boolean If True (default) throw an error telling the user how to download the data if the file does not exist Returns ----------- string giving local absolute path """ assert '/' not in download_name, "Invalid download: %s" % download_name path = join(get_downloads_dir(), download_name, filename) if test_exists and not exists(path): raise RuntimeError( "Missing MHC2flurry downloadable file: %s. " "To download this data, run:\n\tmhc2flurry-downloads fetch %s\n" "in a shell." % (quote(path), download_name)) return path def configure(): """ Setup various global variables based on environment variables. """ global _DOWNLOADS_DIR global _CURRENT_RELEASE _CURRENT_RELEASE = None _DOWNLOADS_DIR = environ.get("MHC2FLURRY_DOWNLOADS_DIR") if not _DOWNLOADS_DIR: metadata = get_downloads_metadata() _CURRENT_RELEASE = environ.get("MHC2FLURRY_DOWNLOADS_CURRENT_RELEASE") if not _CURRENT_RELEASE: _CURRENT_RELEASE = metadata['current-release'] current_release_compatability = ( metadata["releases"][_CURRENT_RELEASE]["compatibility-version"]) current_compatability = metadata["current-compatibility-version"] if current_release_compatability != current_compatability: logging.warning( "The specified downloads are not compatible with this version " "of the MHC2flurry codebase. Downloads: release %s, " "compatability version: %d. Code compatability version: %d", _CURRENT_RELEASE, current_release_compatability, current_compatability) data_dir = environ.get("MHC2FLURRY_DATA_DIR") if not data_dir: # increase the version every time we make a breaking change in # how the data is organized. For changes to e.g. just model # serialization, the downloads release numbers should be used. data_dir = user_data_dir("mhc2flurry", version="1") _DOWNLOADS_DIR = join(data_dir, _CURRENT_RELEASE) logging.debug("Configured MHC2FLURRY_DOWNLOADS_DIR: %s", _DOWNLOADS_DIR) configure()
/mhc2flurry/testing_utils.py
""" Utilities used in MHC2flurry unit tests. """ from .common import configure_tensorflow def startup(): """ Configure Keras backend for running unit tests. """ configure_tensorflow("tensorflow-cpu", num_threads=2) def cleanup(): """ Clear tensorflow session and other process-wide resources. """ import tensorflow.keras.backend as K K.clear_session()
/test/test_class2_neural_network.py
import logging logging.getLogger('tensorflow').disabled = True logging.getLogger('matplotlib').disabled = True import numpy import tensorflow.random numpy.random.seed(0) tensorflow.random.set_seed(0) import pandas from sklearn.metrics import roc_auc_score import mhcgnomes from mhc2flurry.allele_encoding_pair import AlleleEncodingPair from mhc2flurry.allele_encoding import AlleleEncoding from mhc2flurry.class2_neural_network import Class2NeuralNetwork from mhc2flurry.common import random_peptides from mhc2flurry.testing_utils import cleanup, startup teardown = cleanup setup = startup def make_allele_encoding_pair(allele_names, alpha_sequences, beta_sequences): """ Given a list of allele names, return an AlleleEncodingPair """ parsed_alleles = pandas.Series([ mhcgnomes.parse(name, infer_class2_pairing=True) for name in allele_names ]) alpha = parsed_alleles.map(lambda p: p.alpha.to_string()) beta = parsed_alleles.map(lambda p: p.beta.to_string()) encoding = AlleleEncodingPair( AlleleEncoding(alpha, allele_to_sequence=alpha_sequences), AlleleEncoding(beta, allele_to_sequence=beta_sequences), ) return encoding def test_simple(): # Fake pseudosequences alpha_sequences = { "HLA-DRA*01:01": "AAAN", } beta_sequences = { "HLA-DRB1*01:01": "AAAQ", "HLA-DRB1*03:01": "AAAK", } motifs = { "HLA-DRB1*01:01": "A.K", "HLA-DRB1*03:01": "Q.Q", } df = pandas.DataFrame( {"peptide": random_peptides(200000, length=15)} ).set_index("peptide") for (allele, motif) in motifs.items(): df[allele] = (df.index.str.contains(motif)).astype(int) # Resample to have 1:1 binder / non-binder positive_train_df = df.loc[df.max(1) > 0.8] train_df = pandas.concat([ positive_train_df, df.loc[~df.index.isin(positive_train_df.index)].sample( n=len(positive_train_df)) ]) model = Class2NeuralNetwork( minibatch_size=1024, random_negative_rate=1.0, layer_sizes=[4], allele_positionwise_embedding_size=4, patience=10, max_epochs=500, peptide_convolutions=[ {'kernel_size': 3, 'filters': 8, 'activation': "relu"}, ], peptide_encoding={ 'vector_encoding_name': 'BLOSUM62', 'alignment_method': 'right_pad', 'max_length': 20, }, ) train_and_check(train_df, model, alpha_sequences, beta_sequences) def test_combination(): # Fake pseudosequences alpha_sequences = { "HLA-DRA*01:01": "AAAN", } beta_sequences = { "HLA-DRB1*01:01": "AAAA", "HLA-DRB1*03:01": "CAAA", "HLA-DRB1*04:01": "AAAC", "HLA-DRB1*05:01": "CAAC", } motifs = { "HLA-DRB1*01:01": "K.AK", "HLA-DRB1*03:01": "Q.CK", "HLA-DRB1*04:01": "K.DQ", "HLA-DRB1*05:01": "Q.EQ", } df = pandas.DataFrame( {"peptide": random_peptides(500000, length=15)} ).set_index("peptide") for (allele, motif) in motifs.items(): df[allele] = (df.index.str.contains(motif)).astype(int) # Resample to have 1:1 binder / non-binder positive_train_df = df.loc[df.max(1) > 0.8] df = pandas.concat([ positive_train_df, df.loc[~df.index.isin(positive_train_df.index)].sample( n=int(len(positive_train_df) / df.shape[1])) ]) model = Class2NeuralNetwork( minibatch_size=1024, random_negative_rate=1.0, layer_sizes=[4], allele_positionwise_embedding_size=4, patience=10, peptide_convolutions=[ {'kernel_size': 4, 'filters': 12, 'activation': "relu"}, ], max_epochs=500, peptide_encoding={ 'vector_encoding_name': 'BLOSUM62', 'alignment_method': 'right_pad', 'max_length': 15, }, ) train_df = df.sample(frac=0.8).copy() # Can we generalize to an unseen allele? # So far, haven't gotten this to work, so leaving this line commented. #train_df["HLA-DRB1*05:01"] = numpy.nan train_and_check( df, model, alpha_sequences, beta_sequences, train_df=train_df) def train_and_check(df, model, alpha_sequences, beta_sequences, train_df=None): print("Binders") print((df > 0.8).sum()) print("Binder rate") print((df > 0.8).mean()) if train_df is None: train_df = df.sample(frac=0.5) test_df = df.loc[~df.index.isin(train_df.index)] stacked = train_df.stack().reset_index().dropna() stacked.columns = ['peptide', 'allele', 'measurement_value'] allele_encoding = make_allele_encoding_pair( stacked.allele, alpha_sequences, beta_sequences) print(model.hyperparameters) model.fit( stacked.peptide.values, affinities=stacked["measurement_value"].values, allele_encoding_pair=allele_encoding ) check_accuracy( train_df, model, alpha_sequences, beta_sequences, message="TRAIN") check_accuracy( test_df, model, alpha_sequences, beta_sequences, message="TEST") def check_accuracy(df, network, alpha_sequences, beta_sequences, message=""): stacked = df.stack().reset_index().dropna() stacked.columns = ['peptide', 'allele', 'measurement_value'] allele_encoding = make_allele_encoding_pair( stacked.allele, alpha_sequences, beta_sequences) stacked["prediction"] = network.predict( stacked.peptide, allele_encoding_pair=allele_encoding) # Overall AUC stacked["binder"] = stacked.measurement_value > 0.8 auc = roc_auc_score(stacked.binder, stacked.prediction) print(message, "Overall AUC", auc) assert auc > 0.7, message # Can we discern a binder for one allele from another? binder_peptides = stacked.loc[stacked.binder].peptide.unique() stacked_binders = stacked.loc[stacked.peptide.isin(binder_peptides)] allele_specific_aucs = [] for (allele, sub_df) in stacked_binders.groupby("allele"): print(allele) print(sub_df) auc = roc_auc_score(sub_df.binder.values, sub_df.prediction.values) allele_specific_aucs.append((allele, auc)) allele_specific_aucs = pandas.DataFrame( allele_specific_aucs, columns=["allele", "auc"]) print(message, "allele specific AUCs:") print(allele_specific_aucs) print(message, "Mean predictions") print(stacked_binders.groupby(["allele", "binder"]).prediction.mean()) for _, row in allele_specific_aucs.iterrows(): assert row.auc > 0.8, (message, row.allele)
/test/test_common.py
from mhc2flurry.common import make_allele_pairs def test_allele_pairs(): alleles = [ "HLA-DRB1*07:01", "HLA-DRB1*16:01", "HLA-DRB4*01:03", "HLA-DRB5*02:02", "HLA-DPA1*01:03", "HLA-DPB1*02:01", "HLA-DPB1*23:01", "HLA-DQA1*01:02", "HLA-DQA1*02:01", "HLA-DQB1*02:02", "HLA-DQB1*05:02", ] result = make_allele_pairs(alleles) assert result == [ 'HLA-DRA*01:01-DRB1*07:01', 'HLA-DRA*01:01-DRB1*16:01', 'HLA-DRA*01:01-DRB4*01:03', 'HLA-DRA*01:01-DRB5*02:02', 'HLA-DPA1*01:03-DPB1*02:01', 'HLA-DPA1*01:03-DPB1*23:01', 'HLA-DQA1*01:02-DQB1*02:02', 'HLA-DQA1*01:02-DQB1*05:02', 'HLA-DQA1*02:01-DQB1*02:02', 'HLA-DQA1*02:01-DQB1*05:02', ]
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
strahman/django-niftyurls
refs/heads/master
{"/niftyurls/admin.py": ["/niftyurls/models.py"], "/niftyurls/management/commands/niftyurls_feeds.py": ["/niftyurls/models.py", "/niftyurls/fetch_feed.py"], "/niftyurls/templatetags/niftyurls_tags.py": ["/niftyurls/models.py"]}
└── ├── demo_project │ └── views.py ├── niftyurls │ ├── admin.py │ ├── fetch_feed.py │ ├── management │ │ └── commands │ │ └── niftyurls_feeds.py │ ├── models.py │ ├── settings.py │ └── templatetags │ └── niftyurls_tags.py └── setup.py
/demo_project/views.py
from django.http import Http404, HttpResponse from django.views.generic.simple import direct_to_template def home(request): return direct_to_template(request, 'home.html')
/niftyurls/admin.py
from django.contrib import admin from models import Source, Feed, Entry, MetaData class FeedAdmin(admin.ModelAdmin): #prepopulated_fields = {"slug": ("title",)} pass class EntryAdmin(admin.ModelAdmin): #date_hierarchy = 'date' list_display = ('title', 'feed') search_fields = ['title', 'description'] admin.site.register(Source) admin.site.register(Feed, FeedAdmin) admin.site.register(Entry, EntryAdmin) admin.site.register(MetaData)
/niftyurls/fetch_feed.py
import socket socket.setdefaulttimeout(60) import datetime, time, urllib2, urlparse #, sys, json, zlib, signal import feedparser class NiftyUrlsException(Exception): pass class FetchFeed(object): def __init__(self, link, page_url=None): self.link = link self.page_url = page_url # useful for deciding on getting the real url or not self.page_url_parsed = urlparse.urlparse(self.page_url) def get_item_summary(self, item): summary = '' if hasattr(item, "summary"): summary = item.summary elif hasattr(item, "content"): summary = item.content[0].value elif hasattr(item, "description"): summary = item.description return summary def get_item_pubdate(self, item): pubdate = None attrs = ['updated_parsed', 'published_parsed', 'date_parsed', 'created_parsed'] for attr in attrs: if hasattr(item, attr): pubdate = getattr(item, attr) break if pubdate: try: ts = time.mktime(pubdate) return datetime.datetime.fromtimestamp(ts) except TypeError: pass return datetime.datetime.now() def get_final_url(self, entry): entry_link_parsed = urlparse.urlparse(entry.link) if entry_link_parsed.hostname!=self.page_url_parsed.hostname: u = urllib2.urlopen(entry.link) entry_link = u.geturl() # get the real url u.close() else: entry_link = entry.link return entry_link def sanitize_item(self, entry): #entry_link = self.get_final_url(entry) entry_link = entry.link return { 'title': entry.title, 'link': entry_link, 'description': self.get_item_summary(entry), 'guid': entry.get("id", entry.link), 'pubdate': self.get_item_pubdate(entry), } def fetch_feed(self): self.data = feedparser.parse(self.link) if 'bozo' in self.data and self.data.bozo: raise NiftyUrlsException('Error fetching %s' % self.link) return True """ def fetch_feed(self): def timeout(signum, frame): raise NiftyUrlsException('Timeout fetching %s' % self.link) signal.signal(signal.SIGALRM, timeout) signal.alarm(65) # timeout in X seconds, in case that socket timeout isn't working try: self.data = feedparser.parse(self.link) if 'bozo' in self.data and self.data.bozo: raise NiftyUrlsException('Error fetching %s' % self.link) except Exception as e: signal.alarm(0) raise(e) return True """
/niftyurls/management/commands/niftyurls_feeds.py
# -*- coding: UTF-8 -*- import signal, datetime, logging, os, sys, time, json, base64 #zlib, bz2, from optparse import make_option from django.core.management.base import BaseCommand from django.contrib.contenttypes.models import ContentType from niftyurls.fetch_feed import FetchFeed from niftyurls.models import Feed, Entry, MetaData from niftyurls.settings import NIFTYURLS_LIMIT_POSTS def alarm_handler(signum, frame): print 'Signal handler called with signal', signum signal.alarm(0) sys.exit(0) signal.signal(signal.SIGALRM, alarm_handler) signal.alarm(60*30) # run for maximul 30 minutes #connection.connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") class Command(BaseCommand): help = "Can be run as a cronjob or directly to download RSS feeds." option_list = BaseCommand.option_list + ( make_option( '--verbose', action='store_true', dest='verbose', default=True, help='Log output to console.' ), make_option( '--limit', action='store', dest='limit', default=300, help='Feed limit' ), ) def handle(self, **options): """ Update the database with articles """ # delete extra posts for feed in Feed.objects.values('id'): excluded_entries = Entry.objects.filter(feed=feed['id']).order_by('-created', '-id')[:NIFTYURLS_LIMIT_POSTS] Entry.objects.filter(feed=feed['id']).exclude(pk__in=excluded_entries).delete() verbose = options.get('verbose', True) logging.basicConfig( filename='news_log.log', level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', ) if verbose: console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) logging.info('Download starting') #total_start = time.time() #new_articles = 0 entry_type = ContentType.objects.get(app_label="niftyurls", model="entry") dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.datetime) else None from django.db import connection connection.connection.text_factory = str for feed in Feed.objects.filter(is_published=True).order_by("last_downloaded")[0:options.get('limit')]: #.filter(slug='hackernews') logging.info("Processing feed: %s" % (feed.title)) start = time.time() logging.info("Downloading: %s" % feed.link) try: fetch = FetchFeed(feed.link, feed.page_url) fetch.fetch_feed() except: #NiftyUrlsException logging.error("Error occurred processing %s" % feed.link) for entry in fetch.data.entries: try: entry = fetch.sanitize_item(entry) except: continue entry_obj, created = Entry.objects.get_or_create(guid=entry['guid'], feed=feed, defaults=entry) try: #zlib.compress #base64.standard_b64encode metadata_value = json.dumps(entry.items(), default=dthandler) #MetaData.objects.get_or_create(content_type=entry_type, object_id=entry_obj.pk, key='entry', defaults={'value':metadata_value}) except Exception as ex: #raise ex pass feed.last_downloaded = datetime.datetime.now() feed.save() end = time.time() logging.info("This feed processing took %fs" % (end - start)) time.sleep(2) # wait a bit, don't lock the database #total_end = time.time()
/niftyurls/models.py
import datetime from django.utils.translation import ugettext as _ from django.db import models from django.contrib.sites.models import Site from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.contrib.auth.models import User from django.template.defaultfilters import slugify class BasicModel(models.Model): is_published = models.BooleanField(default=True, help_text=_('This object is enabled.')) is_featured = models.BooleanField(default=False, help_text=_('This object is special.')) created = models.DateTimeField(auto_now_add=True, db_index=True) modified = models.DateTimeField(auto_now=True, db_index=True) class Meta(object): abstract = True class Source(BasicModel): """ A source is a general news source, like CNN, who may provide multiple feeds. """ title = models.CharField(max_length=255) #slug = models.SlugField(max_length=255, editable=False) link = models.URLField() description = models.TextField(blank=True) logo = models.ImageField(blank=True, upload_to='images/logos') class Meta(object): ordering = ('title',) def save(self, *args, **kwargs): self.slug = slugify(self.title) super(Feed, self).save(*args, **kwargs) def __unicode__(self): return u'%s' % self.title class Feed(BasicModel): """ A feed is the actual RSS/Atom feed that will be downloaded. It has a many-to-many relationship to categories through the FeedCategoryRelationship model, which allows white-lists to be applied to the feed before articles will be added to the category. """ title = models.CharField(max_length=255) link = models.URLField(unique=True) page_url = models.URLField(blank=True, null=True) description = models.TextField() pubdate = models.DateTimeField(blank=True, null=True) source = models.ForeignKey(Source, blank=True, null=True) sites = models.ManyToManyField(Site) last_downloaded = models.DateTimeField(auto_now=True) class Meta(object): ordering = ('title',) def __unicode__(self): return u'%s - %s' % (self.source.title, self.title) class Entry(BasicModel): guid = models.CharField(max_length=255, blank=True, editable=False, db_index=True) title = models.CharField(max_length=255) link = models.URLField() description = models.TextField() pubdate = models.DateTimeField(blank=True, null=True) feed = models.ForeignKey(Feed) #, related_name='entries' class Meta(object): ordering = ('-is_published', '-pubdate', 'title') def __unicode__(self): return u'%s' % self.title def get_absolute_url(self): return self.link class MetaData(models.Model): key = models.CharField(max_length=50) value = models.TextField() content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() content_object = generic.GenericForeignKey('content_type', 'object_id') def __unicode__(self): return u'key:%s, object_id:%d' % (self.key, self.object_id)
/niftyurls/settings.py
import os from django.conf import settings temp_settings = {'example.com': {'rows': '3', 'title': 'Welcome', 'h1': 'Hello'}} temp_settings.update(getattr(settings, 'NIFTYURLS_SETTINGS', {})) NIFTYURLS_MEDIA = getattr(settings, 'NIFTYURLS_MEDIA', os.path.join(settings.MEDIA_URL, 'niftyurls')+"/") NIFTYURLS_SETTINGS = temp_settings NIFTYURLS_LIMIT_POSTS = getattr(settings, 'NIFTYURLS_LIMIT_POSTS', 40) # keep maximum 40 articles in db default_js = ['http://ajax.googleapis.com/ajax/libs/jquery/1.5/jquery.min.js', 'http://cufon.shoqolate.com/js/cufon-yui.js', NIFTYURLS_MEDIA+'niftyurls/fonts/Museo.font.js', NIFTYURLS_MEDIA+'niftyurls/facebox/facebox.js', NIFTYURLS_MEDIA+'niftyurls/js/niftyurls.js', ] default_css = ['http://yui.yahooapis.com/combo?3.3.0/build/cssreset/reset-min.css&3.3.0/build/cssfonts/fonts-min.css&3.3.0/build/cssgrids/grids-min.css&3.3.0/build/cssbase/base-min.css', NIFTYURLS_MEDIA+'niftyurls/css/style.css', NIFTYURLS_MEDIA+'niftyurls/facebox/facebox.css', ] NIFTYURLS_JS = getattr(settings, 'NIFTYURLS_JS', default_js) NIFTYURLS_CSS = getattr(settings, 'NIFTYURLS_CSS', default_css)
/niftyurls/templatetags/niftyurls_tags.py
import re, sys from django.contrib.sites.models import RequestSite from django.contrib.sites.models import Site from django import template from niftyurls.models import Feed from niftyurls.settings import NIFTYURLS_SETTINGS, NIFTYURLS_JS, \ NIFTYURLS_CSS, NIFTYURLS_MEDIA register = template.Library() def replace( string, args ): search = args.split(args[0])[1] replace = args.split(args[0])[2] return re.sub( search, replace, string ) register.filter('replace', replace) def get_site(context): domain = RequestSite(context['request']).domain.replace('www.', '') try: current_site = Site.objects.get(domain=domain) except Site.DoesNotExist: current_site = Site.objects.get(domain='example.com') return current_site def get_settings(context): domain = RequestSite(context['request']).domain.replace('www.', '') if not context.render_context.get(domain): settings = dict(NIFTYURLS_SETTINGS).get('example.com', {}) settings.update(dict(NIFTYURLS_SETTINGS).get(domain, {})) context.render_context[domain] = settings return context.render_context.get(domain) def niftyurls_content(context): settings = get_settings(context) current_site = get_site(context) feeds = Feed.objects.filter(sites=current_site) """ filter feeds with less than N entries """ feeds = filter(lambda feed: feed.entry_set.count()>4, feeds) rows_settings = settings.get('rows', 3) feeds_per_row = [] start = 0 """ 3,2 means 3 rows, 3 columns on the first row, 2 columns on the second row """ columns = rows_settings.split(',') while len(columns) < len(feeds): columns.append(columns[-1]) # repeat the last number of columns for col in columns: end = start+int(col) feeds_per_row.append(feeds[start:end]) start = end extra_context = { 'rows':feeds_per_row, 'current_site': current_site, 'NIFTYURLS_MEDIA': NIFTYURLS_MEDIA, } context.update(extra_context) return context def niftyurls_media(args): context = {} if 'js' in args: context.update(dict(js_files=list(NIFTYURLS_JS))) if 'css' in args: context.update(dict(css_files=list(NIFTYURLS_CSS))) return context def niftyurls_settings(parser, token): try: tag_name, arg = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "%r tag requires a single argument" % token.contents.split()[0] return NiftySettings(arg) class NiftySettings(template.Node): def __init__(self, arg): self.arg = arg def render(self, context): settings = get_settings(context) return settings.get(self.arg, None) register.inclusion_tag('niftyurls/content.html', takes_context = True)(niftyurls_content) register.inclusion_tag('niftyurls/media.html')(niftyurls_media) register.tag('niftyurls_settings', niftyurls_settings)
/setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from distutils.core import setup, find_packages setup( name='django-niftyurls', version='1.0.0-alpha', description='A popurls style Django application.', author='Florentin Sardan', author_email='florentinwww@gmail.com', long_description=open('README.md', 'r').read(), url='http://www.betterprogramming.com/', packages=find_packages(exclude=('demo_project')), packages=[ 'niftyurls', ], install_requires = [ 'Django>=1.2.1', 'PIL', 'feedparser', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ], )
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
AklerQ/python_training
refs/heads/master
{"/data/contact_data.py": ["/model/contact.py"], "/fixture/contact.py": ["/model/contact.py"], "/generator/contact_gen.py": ["/model/contact.py"], "/test/test_add_contact_to_group.py": ["/model/contact.py"], "/test/test_contact_data_validation.py": ["/model/contact.py"], "/test/test_del_contact.py": ["/model/contact.py"], "/test/test_del_contact_from_group.py": ["/model/contact.py"], "/test/test_edit_contact.py": ["/model/contact.py"]}
└── ├── data │ └── contact_data.py ├── fixture │ ├── contact.py │ ├── group.py │ └── navigation.py ├── generator │ └── contact_gen.py ├── model │ └── contact.py └── test ├── test_add_contact_to_group.py ├── test_contact_data_validation.py ├── test_db_matches_ui.py ├── test_del_contact.py ├── test_del_contact_from_group.py ├── test_edit_contact.py └── test_edit_group.py
/data/contact_data.py
from model.contact import Contact import random import string def random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits + " "*10 return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) def random_number(maxlen): symbols = string.digits + ")" + "(" + "-" + " " return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) def random_email(maxlen): symbols = string.ascii_lowercase + string.digits + "_" + "-" return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))] + ['@'] + [random.choice(symbols) for i in range(random.randrange(maxlen))] + ['.', 'ru']) def random_date(maxlen): return str(random.randrange(maxlen)) testdata = [Contact(firstname="", middlename="", lastname="", nickname="", companyname="", address="", homenumber="", worknumber="", email="", email2="", birth_date="//div[@id='content']/form/select[1]//option[1]", birth_month="//div[@id='content']/form/select[2]//option[1]", birth_year="", anniversary_date="//div[@id='content']/form/select[3]//option[1]", anniversary_month="//div[@id='content']/form/select[4]//option[1]", notes="", mobilenumber="", secondarynumber="")] + [ Contact(firstname=random_string("firstname", 10), middlename=random_string("middlename", 10), lastname=random_string ("lastname", 10), nickname=random_string("nickname", 10), companyname=random_string("companyname", 10), address= random_string("address", 25), homenumber=random_number(9), mobilenumber=random_number(12), worknumber=random_number(12), email=random_email(6), email2=random_email(7), email3=random_email(8), birth_date="//div[@id='content']/form/select[1]//option["+random_date(32)+"]", birth_month="//div[@id='content']/form/select[2]//option["+random_date(13)+"]", birth_year=random_number(4), anniversary_date="//div[@id='content']/form/select[3]//option["+random_date(32)+"]", notes=random_string("name", 30), anniversary_month="//div[@id='content']/form/select[4]//option["+random_date(13)+"]", secondarynumber=random_number(12)) for i in range(5)]
/fixture/contact.py
# -*- coding: utf-8 -*- from model.contact import Contact import re class ContactHelper: def __init__(self, app): self.app = app def create(self, contact): wd = self.app.wd self.app.navigation.turn_to_home_page() # create new contact wd.find_element_by_link_text("add new").click() # fill contact form self.fill_contact_fields(contact) # submit created contact wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click() self.app.navigation.return_to_home_page() self.contact_cache = None def fill_contact_fields(self, contact): wd = self.app.wd # fill personal data self.change_field_value("firstname", contact.firstname) self.change_field_value("middlename", contact.middlename) self.change_field_value("lastname", contact.lastname) self.change_field_value("nickname", contact.nickname) self.change_field_value("company", contact.companyname) self.change_field_value("address", contact.address) # fill communication data self.change_field_value("home", contact.homenumber) self.change_field_value("mobile", contact.mobilenumber) self.change_field_value("work", contact.worknumber) self.change_field_value("email", contact.email) self.change_field_value("email2", contact.email2) self.change_field_value("phone2", contact.secondarynumber) # fill dates if not wd.find_element_by_xpath(contact.birth_date).is_selected(): wd.find_element_by_xpath(contact.birth_date).click() if not wd.find_element_by_xpath(contact.birth_month).is_selected(): wd.find_element_by_xpath(contact.birth_month).click() self.change_field_value("byear", contact.birth_year) if not wd.find_element_by_xpath(contact.anniversary_date).is_selected(): wd.find_element_by_xpath(contact.anniversary_date).click() if not wd.find_element_by_xpath(contact.anniversary_month).is_selected(): wd.find_element_by_xpath(contact.anniversary_month).click() # fill contact commentary self.change_field_value("notes", contact.notes) if not wd.find_element_by_xpath(contact.new_group).is_selected(): wd.find_element_by_xpath(contact.new_group).click() def change_field_value(self, field_name, text): wd = self.app.wd if text is not None: wd.find_element_by_name(field_name).click() wd.find_element_by_name(field_name).clear() wd.find_element_by_name(field_name).send_keys(text) def select_contact_by_index(self, index): wd = self.app.wd wd.find_elements_by_name("selected[]")[index].click() def select_first_contact(self): wd = self.app.wd wd.find_element_by_name("selected[]").click() def delete_contact_by_index(self, index): wd = self.app.wd self.app.navigation.turn_to_home_page() self.select_contact_by_index(index) wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() wd.switch_to_alert().accept() # Здесь повторно используется метод TURN вместо RETURN, так как после удаления # не доступен переход по ссылке home_page self.app.navigation.turn_to_home_page() self.contact_cache = None def delete_first_contact(self): self.delete_contact_by_index(0) def edit_contact_by_index(self, index, contact): wd = self.app.wd self.open_contact_to_edit_by_index(index) self.fill_contact_fields(contact) wd.find_element_by_xpath("//input[@name='update'][@value='Update']").click() self.app.navigation.return_to_home_page() self.contact_cache = None def edit_contact_by_id(self, id, contact): wd = self.app.wd self.app.navigation.open_contact_edit_page_by_id(id) self.fill_contact_fields(contact) wd.find_element_by_xpath("//input[@name='update'][@value='Update']").click() self.app.navigation.return_to_home_page() self.contact_cache = None def edit_first_contact(self, contact): self.edit_contact_by_index(0, contact) def count_contacts(self): wd = self.app.wd self.app.navigation.turn_to_home_page() return len(wd.find_elements_by_name("selected[]")) contact_cache = None def get_contact_list(self): if self.contact_cache is None: wd = self.app.wd self.app.navigation.turn_to_home_page() self.contact_cache = [] for row in wd.find_elements_by_css_selector('tr[name=entry]'): cells = row.find_elements_by_css_selector('td') id = cells[0].find_element_by_css_selector('input').get_attribute('value') lastname = cells[1].text firstname = cells[2].text address = cells[3].text all_email = cells[4].text all_phones = cells[5].text self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id, address=address, all_phones_from_home_page=all_phones, all_email_from_home_page=all_email)) return list(self.contact_cache) def open_contact_view_by_index(self, index): wd = self.app.wd self.app.navigation.turn_to_home_page() row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[6] cell.find_element_by_tag_name("a").click() def open_contact_to_edit_by_index(self, index): wd = self.app.wd self.app.navigation.turn_to_home_page() wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr["+str(index+2)+"]/td[8]/a/img").click() def get_contact_info_from_edit_page(self, index): wd = self.app.wd self.open_contact_to_edit_by_index(index) firstname = wd.find_element_by_name('firstname').get_attribute('value') lastname = wd.find_element_by_name('lastname').get_attribute('value') id = wd.find_element_by_name('id').get_attribute('value') homenumber = wd.find_element_by_name('home').get_attribute('value') mobilenumber = wd.find_element_by_name('mobile').get_attribute('value') worknumber = wd.find_element_by_name('work').get_attribute('value') secondarynumber = wd.find_element_by_name('phone2').get_attribute('value') address = wd.find_element_by_name('address').get_attribute('value') email = wd.find_element_by_name('email').get_attribute('value') email2 = wd.find_element_by_name('email2').get_attribute('value') email3 = wd.find_element_by_name('email3').get_attribute('value') return Contact(id=id, firstname=firstname, lastname=lastname, homenumber=homenumber, mobilenumber=mobilenumber, worknumber=worknumber, secondarynumber=secondarynumber, address=address, email=email, email2=email2, email3=email3) def get_contact_from_view_page(self, index): wd = self.app.wd self.open_contact_view_by_index(index) text = wd.find_element_by_id("content").text homenumber = re.search("H: (.*)", text) if homenumber is not None: homenumber = homenumber.group(1) worknumber = re.search("W: (.*)", text) if worknumber is not None: worknumber = worknumber.group(1) mobilenumber = re.search("M: (.*)", text) if mobilenumber is not None: mobilenumber = mobilenumber.group(1) secondarynumber = re.search("P: (.*)", text) if secondarynumber is not None: secondarynumber = secondarynumber.group(1) return Contact(homenumber=homenumber, worknumber=worknumber, mobilenumber=mobilenumber, secondarynumber=secondarynumber) def delete_contact_by_id(self, id): wd = self.app.wd self.app.navigation.turn_to_home_page() self.select_contact_by_id(id) wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click() wd.switch_to_alert().accept() # Здесь повторно используется метод TURN вместо RETURN, так как после удаления # не доступен переход по ссылке home_page self.app.navigation.turn_to_home_page() self.contact_cache = None def select_contact_by_id(self, id): wd = self.app.wd wd.find_element_by_id(id).click() def clean(self, contact): return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip()) def add_contact_to_group(self): wd = self.app.wd wd.find_element_by_name("add").click() self.contact_cache = None def delete_contact_from_group(self): wd = self.app.wd wd.find_element_by_xpath('//input[@name="remove"]').click() self.contact_cache = None
/fixture/group.py
# -*- coding: utf-8 -*- from model.group import Group class GroupHelper: def __init__(self, app): self.app = app def create(self, group): wd = self.app.wd self.app.navigation.open_groups_page() # init group creation wd.find_element_by_name("new").click() # fill group form self.fill_group_fields(group) # submit group creation wd.find_element_by_name("submit").click() self.app.navigation.return_to_groups_page() self.group_cache = None def delete_group_by_index(self, index): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_index(index) # submit deletion wd.find_element_by_name("delete").click() self.app.navigation.return_to_groups_page() self.group_cache = None def delete_first_group(self): self.delete_group_by_index(0) def edit_group_by_index(self, index, input_group): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_index(index) # init group edition wd.find_element_by_name("edit").click() # fill group form self.fill_group_fields(input_group) # submit group edition wd.find_element_by_name("update").click() self.app.navigation.return_to_groups_page() self.group_cache = None def edit_first_group(self, input_group): self.edit_group_by_index(0, input_group) def select_first_group(self): wd = self.app.wd wd.find_element_by_name("selected[]").click() def select_group_by_index(self, index): wd = self.app.wd wd.find_elements_by_name("selected[]")[index].click() def fill_group_fields(self, input_group): self.change_field_value("group_name", input_group.name) self.change_field_value("group_header", input_group.header) self.change_field_value("group_footer", input_group.footer) def change_field_value(self, field_name, text): wd = self.app.wd if text is not None: wd.find_element_by_name(field_name).click() wd.find_element_by_name(field_name).clear() wd.find_element_by_name(field_name).send_keys(text) def count_groups(self): wd = self.app.wd self.app.navigation.open_groups_page() return len(wd.find_elements_by_name("selected[]")) group_cache = None def get_group_list(self): if self.group_cache is None: wd = self.app.wd self.app.navigation.open_groups_page() self.group_cache = [] for element in wd.find_elements_by_css_selector("span.group"): text = element.text id = element.find_element_by_name("selected[]").get_attribute("value") self.group_cache.append(Group(name=text, id=id)) return list(self.group_cache) def delete_group_by_id(self, id): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_id(id) # submit deletion wd.find_element_by_name("delete").click() self.app.navigation.return_to_groups_page() self.group_cache = None def select_group_by_id(self, id): wd = self.app.wd wd.find_element_by_css_selector("input[value='%s']" % id).click() def select_group_by_id_for_add_to(self, id): wd = self.app.wd wd.find_element_by_xpath('//select[@name="to_group"]/option[@value="%s"]' % id).click() def clean(self, group): return Group(id=group.id, name=group.name.strip()) def edit_group_by_id(self, id, input_group): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_id(id) # init group edition wd.find_element_by_name("edit").click() # fill group form self.fill_group_fields(input_group) # submit group edition wd.find_element_by_name("update").click() self.app.navigation.return_to_groups_page() self.group_cache = None
/fixture/navigation.py
# -*- coding: utf-8 -*- class NavigationHelper: def __init__(self, app): self.app = app def open_home_page(self): wd = self.app.wd if not ((len(wd.find_elements_by_link_text("Create account")) > 0) and (len(wd.find_elements_by_link_text("Forgot password")) > 0)): wd.get(self.app.base_url) def turn_to_home_page(self): wd = self.app.wd if not (len(wd.find_elements_by_name("add")) > 0 and wd.find_element_by_xpath("//*[contains(text(), 'Number of results')]")): wd.find_element_by_link_text("home").click() def return_to_home_page(self): wd = self.app.wd if not (len(wd.find_elements_by_name("add")) > 0 and wd.find_element_by_xpath("//*[contains(text(), 'Number of results')]")): wd.find_element_by_link_text("home page").click() def open_groups_page(self): wd = self.app.wd if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0): wd.find_element_by_link_text("groups").click() def return_to_groups_page(self): wd = self.app.wd if not (wd.current_url.endswith("/group.php") and len(wd.find_elements_by_name("new")) > 0): wd.find_element_by_link_text("group page").click() def open_contact_edit_page_by_id(self, id): wd = self.app.wd if not wd.current_url.endswith("/edit.php?id=%s" % id): wd.get(self.app.base_url+"/edit.php?id=%s" % id) def open_group_page_by_id(self, id): wd = self.app.wd if not wd.current_url.endswith("/?group=%s" % id): wd.get(self.app.base_url+"?group=%s" % id)
/generator/contact_gen.py
from model.contact import Contact import random import string import os.path import jsonpickle import getopt import sys try: opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"]) except getopt.GetoptError as err: getopt.usage() sys.exit(2) n = 5 f = "data/contacts.json" for o, a in opts: if o == "-n": n = int(a) elif o == "-f": f = a def random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits + " "*10 return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) def random_number(maxlen): symbols = string.digits + ")" + "(" + "-" + " " return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) def random_email(maxlen): symbols = string.ascii_lowercase + string.digits + "_" + "-" return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))] + ['@'] + [random.choice(symbols) for i in range(random.randrange(maxlen))] + ['.', 'ru']) def random_date(maxlen): return str(random.randrange(maxlen)) testdata = [Contact(firstname="", middlename="", lastname="", nickname="", companyname="", address="", homenumber="", worknumber="", email="", email2="", mobilenumber="", birth_date="//div[@id='content']/form/select[1]//option[1]", birth_month="//div[@id='content']/form/select[2]//option[1]", birth_year="", anniversary_date="//div[@id='content']/form/select[3]//option[1]", anniversary_month="//div[@id='content']/form/select[4]//option[1]", notes="", secondarynumber="", new_group="//select[@name='new_group']/option[@value='[none]']")] + [ Contact(firstname=random_string("firstname", 10), middlename=random_string("middlename", 10), lastname=random_string ("lastname", 10), nickname=random_string("nickname", 10), companyname=random_string("companyname", 10), address= random_string("address", 25), homenumber=random_number(9), mobilenumber=random_number(12), worknumber=random_number(12), email=random_email(6), email2=random_email(7), email3=random_email(8), birth_date="//div[@id='content']/form/select[1]//option["+random_date(32)+"]", birth_month="//div[@id='content']/form/select[2]//option["+random_date(13)+"]", birth_year=random_number(4), anniversary_date="//div[@id='content']/form/select[3]//option["+random_date(32)+"]", notes=random_string("name", 30), anniversary_month="//div[@id='content']/form/select[4]//option["+random_date(13)+"]", secondarynumber=random_number(12), new_group="//select[@name='new_group']/option[@value='[none]']") for i in range(5)] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f) with open(file, "w") as out: jsonpickle.set_encoder_options("json", indent=2) out.write(jsonpickle.encode(testdata))
/model/contact.py
from sys import maxsize class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, nickname=None, companyname=None, address=None, homenumber=None, worknumber=None, mobilenumber=None, faxnumber=None, email=None, email2=None, birth_date=None, birth_month=None, birth_year=None, anniversary_date=None, anniversary_month=None, secondarynumber=None, notes=None, id=None, email3=None, all_phones_from_home_page=None, all_email_from_home_page=None, new_group=None): self.firstname = firstname self.middlename = middlename self.lastname = lastname self.nickname = nickname self.companyname = companyname self.address = address self.homenumber = homenumber self.mobilenumber = mobilenumber self.worknumber = worknumber self.faxnumber = faxnumber self.email = email self.email2 = email2 self.email3 = email3 self.birth_date = birth_date self.birth_month = birth_month self.birth_year = birth_year self.anniversary_date = anniversary_date self.anniversary_month = anniversary_month self.secondarynumber = secondarynumber self.notes = notes self.id = id self.all_phones_from_home_page = all_phones_from_home_page self.all_email_from_home_page = all_email_from_home_page self.new_group = new_group def __repr__(self): return "%s:%s:%s" % (self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id == other.id or self.id is None) and self.firstname == other.firstname \ and self.lastname == other.lastname def id_or_max(self): if self.id: return int(self.id) else: return maxsize
/test/test_add_contact_to_group.py
# -*- coding: utf-8 -*- from model.group import Group from model.contact import Contact from fixture.orm import ORMfixture import random orm = ORMfixture(host="127.0.0.1", name="addressbook", user="root", password="root") def test_add_contact_to_group(app, db): # Проверка на наличие групп if len(db.get_group_list()) == 0: app.group.create(Group(name="For adds contact", header="For adds contact", footer="For adds contact")) # Проверка на наличие свободных контактов if len(db.get_contacts_out_groups()) == 0: app.contact.create(Contact(firstname="Тест_добавления", lastname="Тест_для_добавления", birth_date="//div[@id='content']/form/select[1]//option[1]", birth_month="//div[@id='content']/form/select[2]//option[1]", anniversary_date="//div[@id='content']/form/select[3]//option[1]", anniversary_month="//div[@id='content']/form/select[4]//option[1]", new_group="//select[@name='new_group']/option[@value='[none]']")) contact_list = db.get_contacts_out_groups() contact = random.choice(contact_list) group_list = db.get_group_list() group = random.choice(group_list) app.navigation.turn_to_home_page() app.contact.select_contact_by_id(contact.id) app.group.select_group_by_id_for_add_to(group.id) app.contact.add_contact_to_group() app.navigation.open_group_page_by_id(group.id) # test validation assert contact in list(orm.get_contacts_in_group(group)) assert contact not in list(db.get_contacts_out_groups())
/test/test_contact_data_validation.py
import re from random import randrange from model.contact import Contact def test_random_contact_data_on_home_page(app): contacts = app.contact.get_contact_list() index = randrange(len(contacts)) contact_from_home_page = app.contact.get_contact_list()[index] contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index) assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page) assert contact_from_home_page.all_email_from_home_page == merge_email_like_on_home_page(contact_from_edit_page) assert contact_from_home_page.firstname == contact_from_edit_page.firstname assert contact_from_home_page.lastname == contact_from_edit_page.lastname assert contact_from_home_page.address == contact_from_edit_page.address def clear(s): return re.sub("[() -]", "", s) def merge_phones_like_on_home_page(contact): return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.homenumber, contact.mobilenumber, contact.worknumber, contact.secondarynumber])))) def merge_email_like_on_home_page(contact): return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3]))) def test_full_contacts_data_on_home_page(app, db): contacts = app.contact.get_contact_list() count = len(contacts) contacts_from_db = sorted(list(db.get_contact_list()), key=Contact.id_or_max) contacts_from_ui = sorted(list(app.contact.get_contact_list()), key=Contact.id_or_max) for i in range(count): assert contacts_from_ui[i].firstname.strip() == contacts_from_db[i].firstname.strip() assert contacts_from_ui[i].lastname.strip() == contacts_from_db[i].lastname.strip() assert contacts_from_ui[i].address.strip() == contacts_from_db[i].address.strip() assert contacts_from_ui[i].all_email_from_home_page == merge_email_like_on_home_page(contacts_from_db[i]) assert contacts_from_ui[i].all_phones_from_home_page == merge_phones_like_on_home_page(contacts_from_db[i])
/test/test_db_matches_ui.py
from model.group import Group def test_group_list(app, db): ui_list = app.group.get_group_list() db_list = map(app.group.clean, db.get_group_list()) assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
/test/test_del_contact.py
# -*- coding: utf-8 -*- from model.contact import Contact import random def test_delete_first_contact(app, db, check_ui): if app.contact.count_contacts() == 0: app.contact.create(Contact(firstname="Тест_имени", lastname="Тест_фамилии", birth_date="//div[@id='content']/form/select[1]//option[1]", birth_month="//div[@id='content']/form/select[2]//option[1]", anniversary_date="//div[@id='content']/form/select[3]//option[1]", anniversary_month="//div[@id='content']/form/select[4]//option[1]")) old_contacts = db.get_contact_list() contact = random.choice(old_contacts) app.contact.delete_contact_by_id(contact.id) # Test validation new_contacts = db.get_contact_list() assert len(old_contacts) - 1 == len(new_contacts) old_contacts.remove(contact) assert old_contacts == new_contacts if check_ui: new_contacts = map(app.contact.clean, db.get_contact_list()) assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
/test/test_del_contact_from_group.py
# -*- coding: utf-8 -*- from model.group import Group from model.contact import Contact from fixture.orm import ORMfixture import random db = ORMfixture(host="127.0.0.1", name="addressbook", user="root", password="") def test_del_contact_from_group(app): # Проверка на наличие групп if len(db.get_group_list()) == 0: app.group.create(Group(name="For adds contact", header="For adds contact", footer="For adds contact")) group_list = db.get_group_list() group = random.choice(group_list) # Проверка на наличие контактов в группе if len(db.get_contacts_in_group(group)) == 0: app.contact.create(Contact(firstname="Тест_добавления", lastname="Тест_для_добавления", birth_date="//div[@id='content']/form/select[1]//option[1]", birth_month="//div[@id='content']/form/select[2]//option[1]", anniversary_date="//div[@id='content']/form/select[3]//option[1]", anniversary_month="//div[@id='content']/form/select[4]//option[1]", new_group="//select[@name='new_group']/option[@value='%s']" % group.id)) app.navigation.open_group_page_by_id(group.id) contacts_list = db.get_contacts_in_group(group) contact = random.choice(contacts_list) app.contact.select_contact_by_id(contact.id) app.contact.delete_contact_from_group() app.navigation.open_group_page_by_id(group.id) # test validation assert contact in list(db.get_contacts_not_in_group(group)) assert contact not in list(db.get_contacts_in_group(group))
/test/test_edit_contact.py
# -*- coding: utf-8 -*- from model.contact import Contact import random def test_edit_contact_by_index(app, db, check_ui): if app.contact.count_contacts() == 0: app.contact.create(Contact(firstname="For modify", birth_date="//div[@id='content']/form/select[1]//option[1]", birth_month="//div[@id='content']/form/select[2]//option[1]", anniversary_date="//div[@id='content']/form/select[3]//option[1]", anniversary_month="//div[@id='content']/form/select[4]//option[1]")) old_contacts = db.get_contact_list() contact = random.choice(old_contacts) input_contact = Contact(firstname="Отредактирован", middlename="Отредактирович", lastname="Отредактированский", nickname="Редактор", companyname='ОАО "Редакция и Мир"', address="редакторский городок", homenumber="567-22-04", worknumber="45+6", email="glavred@mir.ur", notes="Здесь могла бы быть ваша реклама", email2="", birth_date="//div[@id='content']/form/select[1]//option[4]", birth_month="//div[@id='content']/form/select[2]//option[5]", birth_year="", anniversary_date="//div[@id='content']/form/select[3]//option[6]", anniversary_month="//div[@id='content']/form/select[4]//option[7]", mobilenumber="12345678", secondarynumber="(098)76543") input_contact.id = contact.id app.contact.edit_contact_by_id(contact.id, input_contact) # Test validation new_contacts = db.get_contact_list() assert len(old_contacts) == len(new_contacts) idx = int(old_contacts.index(contact)) old_contacts[idx] = input_contact assert old_contacts == new_contacts if check_ui: new_contacts = map(app.contact.clean, db.get_contact_list()) assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
/test/test_edit_group.py
# -*- coding: utf-8 -*- from model.group import Group import random def test_edit_first_group_footer(app, db, check_ui): if len(db.get_group_list()) == 0: app.group.create(Group(name="For modification", header="For modification", footer="For modification")) old_groups = db.get_group_list() group = random.choice(old_groups) input_group = Group(name="Modify name", header="Modify header", footer="Modify footer") app.group.edit_group_by_id(group.id, input_group) # Test validation new_groups = db.get_group_list() assert len(old_groups) == len(new_groups) idx = int(old_groups.index(group)) old_groups[idx] = input_group assert old_groups == new_groups if check_ui: new_groups = map(app.group.clean, db.get_group_list()) assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
smartgang/KViewer
refs/heads/master
{"/Indexer/__init__.py": ["/Indexer/IndexerWidget.py", "/Indexer/DMI.py", "/Indexer/MACD.py", "/Indexer/KDJ.py", "/Indexer/EMA.py", "/Indexer/HullRsi.py", "/Indexer/ATR.py", "/Indexer/RSI.py"], "/KViewer_new.py": ["/ChildGraph.py"], "/kviewer_app.py": ["/indexer.py"], "/Indexer/ATR.py": ["/Indexer/IndexerBase.py"], "/Indexer/DMI.py": ["/Indexer/IndexerBase.py"], "/Indexer/EMA.py": ["/Indexer/IndexerBase.py"], "/Indexer/HullRsi.py": ["/Indexer/IndexerBase.py"], "/Indexer/KDJ.py": ["/Indexer/IndexerBase.py"], "/Indexer/MACD.py": ["/Indexer/IndexerBase.py"], "/Indexer/RSI.py": ["/Indexer/IndexerBase.py"], "/MainFrame.py": ["/KViewer_new.py"]}
└── ├── ChildGraph.py ├── DataInterface │ └── DataInterface.py ├── Indexer │ ├── ATR.py │ ├── DMI.py │ ├── EMA.py │ ├── HullRsi.py │ ├── IndexerBase.py │ ├── IndexerWidget.py │ ├── KDJ.py │ ├── MACD.py │ ├── RSI.py │ └── __init__.py ├── KViewer_new.py ├── MainFrame.py ├── complex.py ├── complex2.py ├── complexExample.py ├── decouple_window.py ├── indexer.py ├── kviewer1.py ├── kviewer2.py ├── kviewer_app.py ├── nullWindow.py ├── parameter.py ├── parameter2.py └── untitled.py
/ChildGraph.py
# -*- coding: utf-8 -*- import sys from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from Indexer import * import pyqtgraph as pg import pandas as pd class ChildGraph(QWidget): main_child_plt_changed = pyqtSignal(name='main_child_plt_changed') def __init__(self, child=True): super(ChildGraph, self).__init__() self.child = child self.frame_layout = QVBoxLayout(self) self.para_setting_btn = QPushButton("参数设置") self.para_setting_btn.setFixedWidth(100) self.indexer_label = QLabel(self) self.vLine = None self.frame_layout.addLayout(self.header_layout()) self.raw_data = None self.open_list = [] self.high_list = [] # 当子图为主图是(child=Fasle), 用来保留raw_data的high和low信息,用于计算Y轴范围 self.low_list = [] self.close_list = [] self.time_list = [] self.plt = None self.indexer_class = None self.indexer_name = '' self.indexer_widget = None def set_raw_data(self, raw_data): # 外部调用,在主图获取到数据后传入数据 # 获取到数据同时加载plt,如果是主图则加载K线ohlc self.raw_data = raw_data if not self.child: self.open_list = self.raw_data['open'].tolist() self.high_list = self.raw_data['high'].tolist() self.low_list = self.raw_data['low'].tolist() self.close_list = self.raw_data['close'].tolist() self.time_list = self.raw_data['strtime'].tolist() self._setup_plt() def _setup_candlestick(self): # 为主图加载K线 csitem = CandlestickItem(self.raw_data) axis = DateAxis(date_strings=self.time_list, orientation='bottom') return csitem, axis def _setup_plt(self): if self.plt: self.plt.close() if not self.child: # 为主图加载K线 item, axis = self._setup_candlestick() self.plt = pg.PlotWidget(axisItems={'bottom': axis}) self.plt.addItem(item, ) self.plt.showGrid(x=True, y=True) self.main_child_plt_changed.emit() else: self.plt = pg.PlotWidget() self.plt.showGrid(x=True, y=True) self.vLine = pg.InfiniteLine(angle=90, movable=False) self.plt.addItem(self.vLine) self.frame_layout.addWidget(self.plt) def header_layout(self): hbox = QHBoxLayout(self) self.para_setting_btn.clicked.connect(self.set_indexer_parameter) hbox.addWidget(self.indexer_label) hbox.addWidget(self.para_setting_btn) return hbox def set_indexer_label(self, xpos): # 设置指标标签的值,同时更新竖线位置 if self.indexer_class: if xpos >= self.indexer_class.value_num: return value_str = self.indexer_class.get_indexer_value_text(xpos) if not self.child: # 主图要加上ohlc数据 open = self.open_list[xpos] close = self.close_list[xpos] if open > close: c = 'green' elif open < close: c = 'red' else: c = 'black' value_str += \ " <span style='color: %s'>open=%0.1f,high=%0.1f,low=%0.1f,close=%0.1f</span>,%s" % ( c, open, self.high_list[xpos], self.low_list[xpos], close, self.time_list[xpos]) self.indexer_label.setText(value_str) self.vLine.setPos(xpos) def set_indexer_parameter(self): # 用户设置指标参数接口,弹出指标设置对话框供用户设置 # 已设置的指标加载已有参数,其余指标均加载默认参数 all_indexer_para_dic = get_all_indexer_para_dic() current_indexer_name = 'MA' if self.indexer_class: all_indexer_para_dic[self.indexer_name] = self.indexer_class.get_para_dic() current_indexer_name = self.indexer_class.indexer_name self.indexer_widget = IndexerWidget(all_indexer_para_dic, current_indexer_name) self.indexer_widget.signal_para_changed.connect(self.indexer_parameter_changed) self.indexer_widget.show() def indexer_parameter_changed(self, selected_indexer, para_dic): # 接收用户设置的新参数,并更新显示 if selected_indexer == self.indexer_name: # 所选指标与已有指标相同,则更新参数 self.indexer_class.update_parameter(para_dic[selected_indexer]) else: # 所选指标与已有指标不同,则加载新指标 if self.indexer_class: #self.plt.clear() self._setup_plt() indexer_class = indexer_mapping_dic[selected_indexer](self.raw_data, self.plt) indexer_class.set_para_dic(para_dic[selected_indexer]) indexer_class.calculate_indexer_value() indexer_class.draw_indexer() self.indexer_class = indexer_class self.indexer_name = selected_indexer self.update_visual_range(200, 400) self.set_indexer_label(200) def update_visual_range(self, start_pos, end_pos): if self.plt and self.indexer_class: # Y轴自适应 value_n = self.indexer_class.value_num start_pos = max(0, start_pos) start_pos = min(start_pos, value_n) end_pos = max(1, end_pos) end_pos = min(end_pos, value_n) if not self.child: minY = min(self.low_list[start_pos:end_pos]) maxY = max(self.high_list[start_pos:end_pos]) else: minY = 999999 maxY = 0 indexer_max_value, indexer_min_value = self.indexer_class.get_polar_value(start_pos, end_pos) minY = min(minY, indexer_min_value) maxY = max(maxY, indexer_max_value) self.plt.setYRange(minY, maxY) self.plt.setXRange(start_pos, end_pos, padding=0) class DateAxis(pg.AxisItem): def __init__(self, date_strings, orientation): pg.AxisItem.__init__(self, orientation) self.date_strings = date_strings self.len = len(self.date_strings) def tickStrings(self, values, scale, spacing): strns = [] for x in values: x1 = int(x) if 0 <= x1 < self.len: strns.append(self.date_strings[x1]) else: strns.append('') return strns ## Create a subclass of GraphicsObject. ## The only required methods are paint() and boundingRect() ## (see QGraphicsItem documentation) class CandlestickItem(pg.GraphicsObject): def __init__(self, data): pg.GraphicsObject.__init__(self) t = range(data.shape[0]) open = data.open.tolist() high = data.high.tolist() low = data.low.tolist() close = data.close.tolist() self.data = zip(t, open, close, low, high) ## data must have fields: time, open, close, min, max self.generatePicture() def generatePicture(self): ## pre-computing a QPicture object allows paint() to run much more quickly, ## rather than re-drawing the shapes every time. self.picture = QPicture() p = QPainter(self.picture) p.setPen(pg.mkPen('w')) w = (self.data[1][0] - self.data[0][0]) / 3. for (t, open, close, min, max) in self.data: p.drawLine(QPointF(t, min), QPointF(t, max)) if open > close: p.setBrush(pg.mkBrush('g')) else: p.setBrush(pg.mkBrush('r')) p.drawRect(QRectF(t - w, open, w * 2, close - open)) p.end() def paint(self, p, *args): p.drawPicture(0, 0, self.picture) def boundingRect(self): ## boundingRect _must_ indicate the entire area that will be drawn on ## or else we will get artifacts and possibly crashing. ## (in this case, QPicture does all the work of computing the bouning rect for us) return QRectF(self.picture.boundingRect()) if __name__ == '__main__': app = QApplication(sys.argv) demo = ChildGraph(False) # demo.update_visual_range(200, 300) demo.set_raw_data(1) demo.show() sys.exit(app.exec_())
/DataInterface/DataInterface.py
# -*- coding: utf-8 -*- import pandas as pd import time import os # 读取中文路径 Collection_Path = unicode('D:\\002 MakeLive\DataCollection\\', 'utf-8') PUBLIC_DATA_PATH = unicode('D:\\002 MakeLive\DataCollection\public data\\', 'utf-8') RAW_DATA_PATH = unicode('D:\\002 MakeLive\DataCollection\\raw data\\', 'utf-8') TICKS_DATA_PATH = unicode('D:\\002 MakeLive\DataCollection\\ticks data\\', 'utf-8') BAR_DATA_PATH = unicode('D:\\002 MakeLive\DataCollection\\bar data\\', 'utf-8') VOLUME_DATA_PATH = unicode('D:\\002 MakeLive\DataCollection\\volume data\\', 'utf-8') TICKS_DATA_START_DATE = '2017-8-17' # 包含了8-17日 LAST_CONCAT_DATA = '2017-10-17' # 记录上次汇总数据的时间,不包含当天(要再加上一天,要不然后面truncate会不对) DATA_TYPE_PUBLIC=1 DATA_TYPE_RAW=2 DATA_TYPE_TICKS=3 def getTradedates(exchangeid='SHFE', startdate='2016-01-01', enddate='2017-12-30'): # 获取交易所的交易日 # 原文件保存在public data文件夹中 startutc = float(time.mktime(time.strptime(startdate + ' 00:00:00', "%Y-%m-%d %H:%M:%S"))) endutc = float(time.mktime(time.strptime(enddate + ' 23:59:59', "%Y-%m-%d %H:%M:%S"))) tradedatedf = pd.read_csv(PUBLIC_DATA_PATH + 'TradeDates.csv', index_col='exchange_id') df = tradedatedf.loc[(tradedatedf['utc_time'] >= startutc) & (tradedatedf['utc_time'] < endutc)] df = df.loc[exchangeid, :] df.reset_index(inplace=True) df.drop('Unnamed: 0', inplace=True, axis=1) return df def generatDailyClose(dailyK): '''获取交易区间时间范围内的交易日和收盘价信息,生成dailyDf''' dailyK['date'] = dailyK['strtime'].str.slice(0, 10) closegrouped = dailyK['close'].groupby(dailyK['date']) utcgrouped = dailyK['utc_time'].groupby(dailyK['date']) dailyClose = pd.DataFrame(closegrouped.last()) dailyClose['preclose'] = dailyClose['close'].shift(1).fillna(0) dailyClose['utc_time'] = utcgrouped.last() return dailyClose # --------------------------------------------------------------------------------------------- def getBarData(symbol='SHFE.RB', K_MIN=60, starttime='2017-05-01 00:00:00', endtime='2018-01-01 00:00:00'): # 读取bar数据 filename = BAR_DATA_PATH + symbol + '\\' + symbol + ' ' + str(K_MIN) + '.csv' df = pd.read_csv(filename) startutc = float(time.mktime(time.strptime(starttime, "%Y-%m-%d %H:%M:%S"))) endutc = float(time.mktime(time.strptime(endtime, "%Y-%m-%d %H:%M:%S"))) ''' df.index=pd.to_datetime(df['utc_time'],unit='s') df = df.tz_localize(tz='PRC') df=df.truncate(before=startdate) ''' df = df.loc[(df['utc_time'] > startutc) & (df['utc_time'] < endutc)] df['Unnamed: 0'] = range(0, df.shape[0]) # df.drop('Unnamed: 0.1', inplace=True,axis=1) df.reset_index(drop=True, inplace=True) # print 'get data success '+symbol+str(K_MIN)+startdate return df def getBarBySymbol(domain_symbol, symbol, bar_type, starttime=None, endtime=None): # 取单个主力合约的数据 print (1) filename = BAR_DATA_PATH + domain_symbol + '\\' + symbol + ' ' + str(bar_type) + '.csv' print (filename) df = pd.read_csv(filename) print (2) print (starttime, endtime) if starttime: startutc = float(time.mktime(time.strptime(starttime, "%Y-%m-%d %H:%M:%S"))) df = df.loc[df['utc_time'] >= startutc] if endtime: endutc = float(time.mktime(time.strptime(endtime, "%Y-%m-%d %H:%M:%S"))) df = df.loc[df['utc_time'] <= endutc] df.reset_index(drop=True, inplace=True) return df def getBarBySymbolList(domain_symbol, symbollist, bar_type, startdate=None, enddate=None, cols=None): # 取全部主力合约的数据,以dic的形式返回 bardic = {} startutc = None endutc = None if startdate: # 过滤掉主力结束时间在开始时间之前的,只取主力结束时间在开始时间之后 startutc = float(time.mktime(time.strptime(startdate + " 00:00:00", "%Y-%m-%d %H:%M:%S"))) if enddate: # 过滤掉主力开始时间在结束时间之后的,只取主力开始时间在结束时间之前 endutc = float(time.mktime(time.strptime(enddate + " 23:59:59", "%Y-%m-%d %H:%M:%S"))) for symbol in symbollist: filename = BAR_DATA_PATH + domain_symbol + '\\' + symbol + ' ' + str(bar_type) + '.csv' if cols: bardf = pd.read_csv(filename)[cols] else: bardf = pd.read_csv(filename) if startutc: bardf = bardf.loc[bardf['utc_time'] >= startutc] if endutc: bardf = bardf.loc[bardf['utc_time'] <= endutc] bardic[symbol] = bardf.reset_index(drop=True) return bardic def getBarDicAfterDomain(symbolinfo, bar_type,cols=None): # 取全部主力合约的数据,以dic的形式返回 domain_symbol = symbolinfo.domain_symbol symbollist = symbolinfo.getSymbolList() bardic = {} startutc , endutc = symbolinfo.getUtcRange() for symbol in symbollist: domain_utc_start, domain_utc_end = symbolinfo.getSymbolDomainUtc(symbol) filename = BAR_DATA_PATH + domain_symbol + '\\' + symbol + ' ' + str(bar_type) + '.csv' if cols: bardf = pd.read_csv(filename)[cols] else: bardf = pd.read_csv(filename) bardf = bardf.loc[bardf['utc_time']>=domain_utc_start] # 只取主力时间之后的数据,以减少总的数据量 if startutc: bardf = bardf.loc[bardf['utc_time'] >= startutc] if endutc: bardf = bardf.loc[bardf['utc_time'] <= endutc] bardic[symbol] = bardf return bardic def getDomainbarByDomainSymbol(symbollist, bardic, symbolDomaindic): # 根据symbolDomaindic中每个合约的时间范围,从bardic中取数组合成主连数据 # 默认双边的symbol是对得上的,不做检查 domain_bar = pd.DataFrame() barlist = [] #timestart = time.time() for symbol in symbollist: utcs = symbolDomaindic[symbol] bars = bardic[symbol] symbol_domain_start = utcs[0] symbol_domain_end = utcs[1] bar = bars.loc[(bars['utc_time'] >= symbol_domain_start) & (bars['utc_time'] <= symbol_domain_end)] #domain_bar = pd.concat([domain_bar, bar]) #domain_bar = domain_bar.append(bar) barlist.append(bar) #timebar = time.time() #print ("timebar %.3f" % (timebar - timestart)) domain_bar = pd.concat(barlist) #timeconcat = time.time() #print ("timeconcat %.3f" % (timeconcat - timebar)) #domain_bar.sort_values('utc_time',inplace=True) # 本来有sort会妥当一点,不过sort比较耗时,就去掉了 #timesort = time.time() #print ("timesort %.3f" % (timesort - timeconcat)) domain_bar.reset_index(drop=True, inplace=True) #timeindex = time.time() #print ("timeindex %.3f" % (timeindex - timeconcat)) return domain_bar def getVolumeData(symbol='SHFE.RB', K_MIN=60, starttime='2017-05-01 00:00:00', endtime='2018-01-01 00:00:00'): # 读取bar数据 filename = VOLUME_DATA_PATH + symbol + '\\' + symbol + ' ' + str(K_MIN) + '_volume.csv' df = pd.read_csv(filename) startutc = float(time.mktime(time.strptime(starttime, "%Y-%m-%d %H:%M:%S"))) endutc = float(time.mktime(time.strptime(endtime, "%Y-%m-%d %H:%M:%S"))) df = df.loc[(df['utc_time'] > startutc) & (df['utc_time'] < endutc)] df['Unnamed: 0'] = range(0, df.shape[0]) # df.drop('Unnamed: 0.1', inplace=True,axis=1) df.reset_index(drop=True, inplace=True) # print 'get data success '+symbol+str(K_MIN)+startdate return df def getTickDateBySymbolDate(domain_symbol='SHFE.RB', symbol='RB1810', date='2018-08-09'): file_name = BAR_DATA_PATH + "%s\\TICK_%s\\Tick_Data_%s_%s.csv" % (domain_symbol, symbol, symbol, date) tick_data = pd.read_csv(file_name) return tick_data ''' def getTickData(symbol='SHFE.RB',K_MIN=60,startdate='2017-05-01',enddate='2018-01-01'): filename=TICKS_DATA_PATH+symbol+'\\'+symbol+'ticks '+str(K_MIN)+'.csv' df=pd.read_csv(filename) starttime=startdate+" 00:00:00" endtime= enddate+" 00:00:00" startutc = float(time.mktime(time.strptime(starttime, "%Y-%m-%d %H:%M:%S"))) endutc = float(time.mktime(time.strptime(endtime,"%Y-%m-%d %H:%M:%S"))) df=df.loc[(df['utc_time']>startutc) & (df['utc_time']<endutc)] df['Unnamed: 0'] = range(0, df.shape[0]) df.drop('Unnamed: 0.1.1',drop=True,inplace=True) df.reset_index(drop=True,inplace=True) #print 'get data success '+symbol+str(K_MIN)+startdate return df ''' def getTickByDate(symbol='SHFE.RB', tradedate='2017-08-07'): filename = TICKS_DATA_PATH + symbol + '\\' + symbol + tradedate + 'ticks.csv' df = pd.read_csv(filename) return df def getContractSwaplist(symbol): datapath = Collection_Path + 'vitualContract\\' df = pd.read_csv(datapath + symbol + 'ContractSwap.csv') return df pass # ---------------------------------------------------------- def getCurrentPath(): ''' 返回当前文件所在路径 :return: ''' return os.path.abspath('.') def getUpperPath(uppernume=1): ''' 返回当前文件所在的上一级路径 :return: ''' p = '/'.join(['..'] * uppernume) return os.path.abspath(p) # ------------------------------------------------------------- def getPriceTick(symbol): ''' 查询品种的最小价格变动 :param symbol: :return: ''' contract = pd.read_excel(PUBLIC_DATA_PATH + 'Contract.xlsx', index_col='Contract') return contract.ix[symbol, 'price_tick'] def getMultiplier(symbol): ''' 查询品种的合约乘数 :param symbol: :return: ''' contract = pd.read_excel(PUBLIC_DATA_PATH + 'Contract.xlsx', index_col='Contract') return contract.ix[symbol, 'multiplier'] def getMarginRatio(symbol): ''' 查询品种的保证金率 :param symbol: :return: ''' contract = pd.read_excel(PUBLIC_DATA_PATH + 'Contract.xlsx', index_col='Contract') return contract.ix[symbol, 'margin_ratio'] def getSlip(symbol): ''' 查询品种配置的滑点 :param symbol: :return: ''' contract = pd.read_excel(PUBLIC_DATA_PATH + 'Contract.xlsx', index_col='Contract') return contract.ix[symbol, 'slip'] class SymbolInfo: POUNDGE_TYPE_HAND = u'hand' POUNDGE_TYPE_RATE = u'rate' '''合约信息类''' def __init__(self, domain_symbol, startdate=None, enddate=None): self.domain_symbol = domain_symbol contract = pd.read_excel(PUBLIC_DATA_PATH + 'domainMap.xlsx', index_col='symbol') contractMapDf = pd.read_csv(PUBLIC_DATA_PATH + 'contractMap.csv', index_col='symbol') self.start_utc = None self.end_utc = None self.contractMap = contractMapDf.loc[contractMapDf['domain_symbol'] == domain_symbol] # 取该主力合约编号对应的合约列表 if startdate: # 过滤掉主力结束时间在开始时间之前的,只取主力结束时间在开始时间之后 self.start_utc = float(time.mktime(time.strptime(startdate+ " 00:00:00", "%Y-%m-%d %H:%M:%S"))) self.contractMap = self.contractMap.loc[self.contractMap['domain_end_utc'] > self.start_utc] if enddate: # 过滤掉主力开始时间在结束时间之后的,只取主力开始时间在结束时间之前 self.end_utc = float(time.mktime(time.strptime(enddate + " 23:59:59", "%Y-%m-%d %H:%M:%S"))) self.contractMap = self.contractMap.loc[self.contractMap['domain_start_utc'] < self.end_utc] self.contractMap = self.contractMap.sort_values('domain_start_utc') # 根据主力时间排序 self.active = contract.ix[domain_symbol, 'active'] # 激活标志 self.priceTick = contract.ix[domain_symbol, 'price_tick'] self.multiplier = contract.ix[domain_symbol, 'multiplier'] self.marginRatio = contract.ix[domain_symbol, 'margin_ratio'] self.slip = contract.ix[domain_symbol, 'slip'] self.poundageType = contract.ix[domain_symbol, 'poundage_type'] self.poundageFee = contract.ix[domain_symbol, 'poundage_fee'] self.poundageRate = contract.ix[domain_symbol, 'poundage_rate'] def getPriceTick(self): return self.priceTick def getMultiplier(self): return self.multiplier def getMarginRatio(self): return self.marginRatio def getSlip(self): return self.slip def getPoundage(self): return self.poundageType, self.poundageFee, self.poundageRate def getSymbolList(self): return self.contractMap.index.tolist() def getSymbolDomainUtc(self, symbol): return self.contractMap.ix[symbol, 'domain_start_utc'], self.contractMap.ix[symbol, 'domain_end_utc'] def getSymbolDomainTime(self, symbol): return self.contractMap.ix[symbol, 'domain_start_date'], self.contractMap.ix[symbol, 'domain_end_date'] def getSymbolLifeDate(self, symbol): # 获取合约的生命周期时间 return self.contractMap.ix[symbol, 'listed_date'], self.contractMap.ix[symbol, 'maturity_date'] def getUtcRange(self): return self.start_utc, self.end_utc def getSymbolDomainDic(self): domainDic = {} symbolList = self.getSymbolList() for symbol in symbolList: s, e = self.getSymbolDomainUtc(symbol) domainDic[symbol] = [s, e] return domainDic def amendSymbolDomainDicByOpr(self, oprdf, closeutc_col='closeutc'): # 基于传入的oprdf修正symbolDomainDic,因为合约切换时,会有持仓未平仓导致上一合约实际生效时间超过其主力结束时间的现象,故要修改正symbolDomainDic # 注:可能会有些合适期间没有opr的情况,所以symbolList会比opr中的symbollist少 oprgrouped = oprdf.groupby('symbol') symbol_last_utc_list = oprgrouped[closeutc_col].last() opr_symbol_list = symbol_last_utc_list.index.tolist() symbol_last_utc = None domainDic = {} symbolList = self.getSymbolList() for symbol in symbolList: s, e = self.getSymbolDomainUtc(symbol) if symbol_last_utc: s = symbol_last_utc + 1 # 如果上一个合约的最后一次操作超过其主力结束时间,则下一合约从其最后操作结束时间的下一秒开始算,规避取出两个合约同一个时间的数据的问题 if symbol in opr_symbol_list: symbol_last_utc = symbol_last_utc_list[symbol] if symbol_last_utc and symbol_last_utc > e: e = symbol_last_utc else: symbol_last_utc = None domainDic[symbol] = [s, e] return domainDic def isActive(self): return self.active class TickDataSupplier: def __init__(self, symbol, startdate, enddate): self.startdate = startdate self.enddate = enddate self.startdateutc = float(time.mktime(time.strptime(startdate + ' 00:00:00', "%Y-%m-%d %H:%M:%S"))) self.enddateutc = float(time.mktime(time.strptime(enddate + ' 23:59:59', "%Y-%m-%d %H:%M:%S"))) self.symbol = symbol self.exchange, self.secid = symbol.split('.', 1) self.datelist = getTradedates(self.exchange, self.startdate, self.enddate)['strtime'] self.tickdatadf = pd.DataFrame() for d in self.datelist: print 'Collecting tick data:', d self.tickdatadf = pd.concat([self.tickdatadf, getTickByDate(self.symbol, d)]) def getTickData(self, starttime, endtime): startutc = float(time.mktime(time.strptime(starttime, "%Y-%m-%d %H:%M:%S"))) endutc = float(time.mktime(time.strptime(endtime, "%Y-%m-%d %H:%M:%S"))) ''' df.index=pd.to_datetime(df['utc_time'],unit='s') df = df.tz_localize(tz='PRC') df=df.truncate(before=startdate) ''' df = self.tickdatadf.loc[(self.tickdatadf['utc_time'] > startutc) & (self.tickdatadf['utc_time'] < endutc)] df['Unnamed: 0'] = range(0, df.shape[0]) # df.drop('Unnamed: 0.1.1', inplace=True, axis=1) df.reset_index(drop=True, inplace=True) return df def getTickDataByUtc(self, startutc, endutc): df = self.tickdatadf.loc[(self.tickdatadf['utc_time'] > startutc) & (self.tickdatadf['utc_time'] < endutc)] df['Unnamed: 0'] = range(0, df.shape[0]) # df.drop('Unnamed: 0.1.1', inplace=True, axis=1) df.reset_index(drop=True, inplace=True) return df def getDateRange(self): return self.startdate, self.enddate def getDateUtcRange(self): return self.startdateutc, self.enddateutc def getSymbol(self): return self.symbol def getDateList(self): return self.datelist def symbolInfoTest(): domain_symbol = 'SHFE.RB' symbolinfo = SymbolInfo(domain_symbol) symbollist = symbolinfo.getSymbolList() print symbolinfo.getSymbolDomainDic() print symbolinfo.isActive() bardic = getBarBySymbolList(domain_symbol, symbollist, 3600) for symbol in symbollist: print bardic[symbol].head(5) # ======================================================================================== if __name__ == '__main__': # df=getBarData("SHFE.RB",K_MIN=600,starttime='2011-10-08 00:00:00',endtime='2013-03-20 00:00:00') # df=getTradedates('SHFE','2017-10-01','2017-12-12') # ticksupplier = TickDataSupplier('SHFE.RB', '2017-10-01', '2017-12-10') # df1 = ticksupplier.getTickData('2017-10-01 00:00:00', '2017-12-03 22:10:15') # print df1.head(10) # print df1.tail(10) symbolInfoTest()
/Indexer/ATR.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase import numpy as np class ATR(IndexerBase): indexer_name = 'ATR' indexer_name_list = ['ATR', 'TR'] default_para_dic = { 'N': 26, } def __init__(self, raw_data, plt): super(ATR, self).__init__(raw_data, plt) self.indexer_name_list = ['ATR', 'TR'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 self.indexer_color_dic = { 'ATR': 'blue', 'TR': 'magenta' } def calculate_indexer_value(self): n = self.para_dic['N'] closeshift1 = self.raw_data.close.shift(1).fillna(0) self.raw_data['c'] = self.raw_data.high - self.raw_data.low self.raw_data['d'] = np.abs(self.raw_data.high - closeshift1) self.raw_data['b'] = np.abs(self.raw_data.low - closeshift1) self.raw_data['TR'] = self.raw_data[['c', 'd', 'b']].max(axis=1) self.raw_data.loc[self.raw_data['open'] < self.raw_data['close'], 'TR'] = 0 - self.raw_data['TR'] self.raw_data['ATR'] = np.abs(self.raw_data['TR'].rolling(window=n).mean()) self.indexer_value_dic['TR'] = self.raw_data['TR'].tolist() self.indexer_value_dic['ATR'] = self.raw_data['ATR'].tolist() def draw_indexer(self): i = 0 for indexer_name, values in self.indexer_value_dic.items(): c = self.indexer_color_dic[indexer_name][0] self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c) self.plt_dic[indexer_name].setData(values) i += 1 def re_draw_indexer(self): for pname, values in self.indexer_value_dic.items(): self.plt_dic[pname].setData(values) def get_polar_value(self,start_pos, end_pos): max_v = max(max(self.indexer_value_dic['ATR'][start_pos:end_pos]), max(self.indexer_value_dic['TR'][start_pos:end_pos])) min_v = min(min(self.indexer_value_dic['ATR'][start_pos:end_pos]), min(self.indexer_value_dic['TR'][start_pos:end_pos])) return max_v, min_v
/Indexer/DMI.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase import numpy as np import pandas as pd class DMI(IndexerBase): indexer_name = 'DMI' indexer_name_list = ['PDI', 'MDI', 'ADX', 'ADXR'] default_para_dic = { 'N': 14, 'M': 6, } def __init__(self, raw_data, plt): super(DMI, self).__init__(raw_data, plt) self.indexer_name_list = ['PDI', 'MDI', 'ADX', 'ADXR'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 self.indexer_color_dic = { 'PDI': 'blue', 'MDI': 'magenta', 'ADX': 'cyan', 'ADXR': 'green' } def calculate_indexer_value(self): n = self.para_dic['N'] m = self.para_dic['M'] high = self.raw_data.high print ('high') low = self.raw_data.low close = self.raw_data.close closeshift1 = close.shift(1).fillna(0) c = high - low d = high - closeshift1 df1 = pd.DataFrame({'c': c, 'd': d}) df1['A'] = df1.max(axis=1) df1.drop('c', axis=1, inplace=True) df1.drop('d', axis=1, inplace=True) df1['B'] = np.abs(low - closeshift1) df1['C'] = df1.max(axis=1) df1['TR'] = df1['C'].rolling(n).sum() HD = high - high.shift(1).fillna(0) LD = low.shift(1).fillna(0) - low df1['HD'] = HD df1['LD'] = LD df2 = pd.DataFrame({'HD': HD, 'LD': LD}) df2['DMP_1'] = df2[(df2['HD'] > df2['LD']) & (df2['HD'] > 0)]['HD'] df2['DMM_1'] = df2[(df2['LD'] > df2['HD']) & (df2['LD'] > 0)]['LD'] df2 = df2.fillna(0) df1['DMP'] = df2['DMP_1'].rolling(n).sum() df1['DMM'] = df2['DMM_1'].rolling(n).sum() del df2 df1['PDI'] = df1['DMP'] * 100 / df1['TR'] df1['MDI'] = df1['DMM'] * 100 / df1['TR'] adx = np.abs(df1['MDI'] - df1['PDI']) / (df1['MDI'] + df1['PDI']) * 100 print ("pre adx") df1['ADX'] = adx.rolling(m).mean() df1['ADXR'] = (df1['ADX'] + df1['ADX'].shift(m).fillna(0)) / 2 self.indexer_value_dic['PDI'] = df1['PDI'].tolist() self.indexer_value_dic['MDI'] = df1['MDI'].tolist() self.indexer_value_dic['ADX'] = df1['ADX'].tolist() self.indexer_value_dic['ADXR'] = df1['ADXR'].tolist() def draw_indexer(self): i = 0 for indexer_name, values in self.indexer_value_dic.items(): c = self.indexer_color_dic[indexer_name][0] self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c) self.plt_dic[indexer_name].setData(values) i += 1 def re_draw_indexer(self): for pname, values in self.indexer_value_dic.items(): self.plt_dic[pname].setData(values) def get_polar_value(self,start_pos, end_pos): max_v = max(max(self.indexer_value_dic['PDI'][start_pos:end_pos]), max(self.indexer_value_dic['MDI'][start_pos:end_pos]), max(self.indexer_value_dic['ADX'][start_pos:end_pos]), max(self.indexer_value_dic['ADXR'][start_pos:end_pos])) min_v = min(min(self.indexer_value_dic['PDI'][start_pos:end_pos]), min(self.indexer_value_dic['MDI'][start_pos:end_pos]), min(self.indexer_value_dic['ADX'][start_pos:end_pos]), min(self.indexer_value_dic['ADXR'][start_pos:end_pos])) return max_v, min_v
/Indexer/EMA.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase class EMA(IndexerBase): indexer_name = 'EMA' indexer_name_list = [] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 default_para_dic = { 'N1': 5, 'N2': 10, 'N3': 15, 'N4': 30, 'N5': 50 } def __init__(self, raw_data, plt): self.indexer_name_list = [] for para_name, value in self.default_para_dic.items(): self.indexer_name_list.append("EMA%d"%value) super(EMA,self).__init__(raw_data, plt) self.indexer_color_dic = { 'N1': 'blue', 'N2': 'magenta', 'N3': 'cyan', 'N4': 'red', 'N5': 'green' } def calculate_indexer_value(self): del self.indexer_value_dic del self.indexer_name_list self.indexer_name_list = [] self.indexer_value_dic = {} for para_name, para_value, in self.para_dic.items(): indexer_name = "EMA%d" % para_value self.indexer_name_list.append(indexer_name) self.indexer_value_dic[indexer_name] = self.raw_data['close'].ewm(span=para_value, adjust=False).mean().tolist() def draw_indexer(self): i = 0 for pname, values in self.para_dic.items(): indexer_name = "EMA%d" % values c = self.indexer_color_dic[pname][0] self.plt_dic[pname] = self.plt.plot(name=pname, pen=c) self.plt_dic[pname].setData(self.indexer_value_dic[indexer_name]) i += 1 def re_draw_indexer(self): for pname, values in self.para_dic.items(): indexer_name = "EMA%d" % values self.plt_dic[pname].setData(self.indexer_value_dic[indexer_name]) def get_polar_value(self,start_pos, end_pos): max_v = 0 min_v = 99999 for value_list in self.indexer_value_dic.values(): max_v = max(max_v, max(value_list[start_pos:end_pos])) min_v = min(min_v, min(value_list[start_pos:end_pos])) return max_v, min_v def get_indexer_value_text(self, pos): # 根据传入的位置返回一个指标值的字符串 t = self.indexer_name + '(' for para_name, para_value in self.para_dic.items(): t += '%s_%d ' % (para_name, para_value) t += ')' i = 0 for para_name, para_value in self.para_dic.items(): indexer_name = 'EMA%d'%para_value c = self.indexer_color_dic[para_name] t += "<span style='color: %s'>%s=%0.2f </span>" % (c, indexer_name, self.indexer_value_dic[indexer_name][pos]) i += 1 return t
/Indexer/HullRsi.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase import numpy as np import talib class HULL_RSI(IndexerBase): indexer_name = 'HULL_RSI' indexer_name_list = ['RSI'] default_para_dic = { 'N1': 5, 'M1': 5, 'M2': 9, 'N': 8 } def __init__(self, raw_data, plt): super(HULL_RSI, self).__init__(raw_data, plt) self.indexer_name_list = ['RSI'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 self.indexer_color_dic = { 'RSI': 'blue' } def calculate_indexer_value(self): n1 = self.para_dic['N1'] m1 = self.para_dic['M1'] m2 = self.para_dic['M2'] n = self.para_dic['N'] close_array = np.array(self.raw_data['close'].values, dtype='float') n = float(n) rsi_data = talib.RSI(close_array, n1) rsi_ema1 = talib.EMA(rsi_data, m1) rsi_ema2 = talib.EMA(rsi_ema1, m2) rsi_new = rsi_ema1 - rsi_ema2 n_2 = round(n / 2, 0) n_squr = round(np.sqrt(n), 0) wma1 = talib.MA(rsi_new, n, matype=2) wma2 = talib.MA(rsi_new, n_2, matype=2) x = wma2 * 2 - wma1 hull_ma = talib.MA(x, n_squr, matype=2) self.indexer_value_dic['RSI'] = hull_ma def draw_indexer(self): i = 0 for indexer_name, values in self.indexer_value_dic.items(): c = self.indexer_color_dic[indexer_name][0] self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c) self.plt_dic[indexer_name].setData(values) i += 1 def re_draw_indexer(self): for pname, values in self.indexer_value_dic.items(): self.plt_dic[pname].setData(values) def get_polar_value(self,start_pos, end_pos): max_v = max(self.indexer_value_dic['RSI'][start_pos:end_pos]) min_v = min(self.indexer_value_dic['RSI'][start_pos:end_pos]) return max_v, min_v
/Indexer/IndexerBase.py
# -*- coding: utf-8 -*- """ 指标类,用于管理指标相内容: """ class IndexerBase(object): color_list = ['blue', 'magenta', 'cyan', 'red', 'green'] indexer_name = '' indexer_name_list = [] default_para_dic = {} para_dic = {} indexer_color_dic = {} # 各指标对应的显示颜色 def __init__(self, raw_data, plt): self.raw_data = raw_data self.para_dic = {} for para_name, value in self.default_para_dic.items(): self.para_dic[para_name] = value self.indexer_value_dic = {} self.plt = plt self.plt_dic = {} self.value_num = self.raw_data.shape[0] #self.calculate_indexer_value() #self.draw_indexer() pass def calculate_indexer_value(self): pass def draw_indexer(self): pass def re_draw_indexer(self): pass def get_polar_value(self, start_pos, end_pos): pass def close_sub_plt(self): for plt in self.plt_dic.values(): plt.close() def set_para_dic(self,para_dic): for para_name in self.para_dic.keys(): self.para_dic[para_name] = para_dic[para_name] def get_para_dic(self): return self.para_dic def update_raw_data(self,raw_data): self.raw_data = raw_data self.calculate_indexer_value() self.re_draw_indexer() def update_parameter(self, para_dic): changed = False for para_name in self.default_para_dic.keys(): if self.para_dic[para_name] != para_dic[para_name]: self.para_dic[para_name] = para_dic[para_name] changed = True if changed: self.calculate_indexer_value() self.re_draw_indexer() def get_indexer_value_text(self, pos): # 根据传入的位置返回一个指标值的字符串 t = self.indexer_name + '(' for para_name, para_value in self.para_dic.items(): t += '%s_%d ' % (para_name, para_value) t += ')' i = 0 for para_name, c in self.indexer_color_dic.items(): t += "<span style='color: %s'>%s=%0.2f </span>" % (c, para_name, self.indexer_value_dic[para_name][pos]) i += 1 return t
/Indexer/IndexerWidget.py
# -*- coding: utf-8 -*- import sys from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from Indexer import * class IndexerWidget(QWidget): signal_para_changed = pyqtSignal(str,dict, name='para_changed') def __init__(self, all_indexer_para_dic, current_indexer_name): super(IndexerWidget,self).__init__() #self.setGeometry(300,50,10,10) self.setWindowTitle('设置指标参数') self.indexer_para_dic = all_indexer_para_dic self.leftlist = QListWidget() self.para_line_edit_dic = {} self.stack_dic = {} self.indexer_pos_dic = {} # 记录各个指标在leftlist中的位置 i = 0 current_indexer_pos = 0 self.stack = QStackedWidget(self) for indexer_name in self.indexer_para_dic.keys(): self.leftlist.insertItem(i, indexer_name) stack_widget= QWidget() layout = QFormLayout() indexer_para_dic = self.indexer_para_dic[indexer_name] line_edit_dic = {} for name, value in indexer_para_dic.items(): le = QLineEdit() le.setValidator(QIntValidator()) le.setMaxLength(2) le.setText(str(value)) line_edit_dic[name] = le layout.addRow(name, le) self.para_line_edit_dic[indexer_name] = line_edit_dic stack_widget.setLayout(layout) self.stack_dic[indexer_name] = stack_widget self.stack.addWidget(stack_widget) if indexer_name == current_indexer_name: current_indexer_pos = i i += 1 main_box = QVBoxLayout(self) hbox = QHBoxLayout(self) hbox.addWidget(self.leftlist) hbox.addWidget(self.stack) main_box.addLayout(hbox) btn_layout = self.setup_button() main_box.addLayout(btn_layout) self.setLayout(btn_layout) self.leftlist.currentRowChanged.connect(self.display) self.leftlist.setCurrentRow(current_indexer_pos) def setup_button(self): vbox = QHBoxLayout(self) btn_ok = QPushButton('OK') btn_cancel = QPushButton('Cancle') btn_ok.clicked.connect(self.get_user_para) btn_cancel.clicked.connect(self.close) vbox.addWidget(btn_ok) vbox.addWidget(btn_cancel) return vbox def get_user_para(self): all_para_dic = {} for indexer_name, line_edit_dic in self.para_line_edit_dic.items(): para_dic = {} for para_name, line_edit in line_edit_dic.items(): para_dic[para_name] = int(line_edit.text()) all_para_dic[indexer_name] = para_dic selected_indexer = self.leftlist.currentItem().text() self.signal_para_changed.emit(selected_indexer,all_para_dic) self.close() def display(self,i): self.stack.setCurrentIndex(i) class test1(): def __init__(self, name): self.name = name def receive_para_changed(self,selected,dict): print (self.name, selected, dict) if __name__ == '__main__': app = QApplication(sys.argv) all_indexer_para_dic = get_all_indexer_para_dic() demo = IndexerWidget(all_indexer_para_dic) #demo2 = IndexerWidget(all_indexer_para_dic) c1 = test1('test1') #c2 = test1('test2') demo.signal_para_changed.connect(c1.receive_para_changed) #demo2.signal_para_changed.connect(c2.receive_para_changed) demo.show() #demo2.show() sys.exit(app.exec_())
/Indexer/KDJ.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase import numpy as np import talib class KDJ(IndexerBase): indexer_name = 'KDJ' indexer_name_list = ['K', 'D', 'J'] default_para_dic = { 'N': 9, 'M1': 3, 'M2': 3 } def __init__(self, raw_data, plt): super(KDJ, self).__init__(raw_data, plt) self.indexer_name_list = ['K', 'D', 'J'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 self.indexer_color_dic = { 'K': 'blue', 'D': 'magenta', 'J': 'cyan' } def calculate_indexer_value(self): n = self.para_dic['N'] m1 = self.para_dic['M1'] m2 = self.para_dic['M2'] low_list = self.raw_data['low'].rolling(n).min().fillna(self.raw_data['low']) # 使用low的值来填充前面的空白 high_list = self.raw_data['high'].rolling(n).max().fillna(self.raw_data['high']) # 使用high来填充 rsv = (self.raw_data['close'] - low_list) / (high_list - low_list) * 100 a = 1.0/m1 a2 = 1.0/m2 kdj_k = rsv.ewm(alpha=a, adjust=False).mean() kdj_d = kdj_k.ewm(alpha=a2, adjust=False).mean() kdj_j = 3 * kdj_k - 2 * kdj_d self.indexer_value_dic['K'] = kdj_k self.indexer_value_dic['D'] = kdj_d self.indexer_value_dic['J'] = kdj_j def draw_indexer(self): i = 0 for indexer_name, values in self.indexer_value_dic.items(): c = self.indexer_color_dic[indexer_name][0] self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c) self.plt_dic[indexer_name].setData(values) i += 1 def re_draw_indexer(self): for pname, values in self.indexer_value_dic.items(): self.plt_dic[pname].setData(values) def get_polar_value(self,start_pos, end_pos): max_v = max(max(self.indexer_value_dic['K'][start_pos:end_pos]), max(self.indexer_value_dic['D'][start_pos:end_pos]), max(self.indexer_value_dic['J'][start_pos:end_pos])) min_v = min(min(self.indexer_value_dic['K'][start_pos:end_pos]), min(self.indexer_value_dic['D'][start_pos:end_pos]), min(self.indexer_value_dic['J'][start_pos:end_pos])) return max_v, min_v
/Indexer/MACD.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase import pyqtgraph as pg class MACD(IndexerBase): indexer_name = 'MACD' indexer_name_list = ['DIF', 'DEA', 'HIST'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 default_para_dic = { 'Short':12, 'Mid':9, 'Long':26 } def __init__(self, raw_data, plt): super(MACD, self).__init__(raw_data, plt) self.indexer_name_list = ['DIF', 'DEA', 'HIST'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 self.hist_item_up = None self.hist_item_down = None self.indexer_color_dic = { 'DIF': 'blue', 'DEA': 'magenta', 'HIST': 'red' } def calculate_indexer_value(self): closedata = self.raw_data['close'] short = self.para_dic['Short'] long1 = self.para_dic['Long'] mid = self.para_dic['Mid'] sema = closedata.ewm(span=short, adjust=False).mean() lema = closedata.ewm(span=long1, adjust=False).mean() data_dif = sema - lema # data_dea = pd.ewma(data_dif, span=mid) data_dea = data_dif.ewm(span=mid, adjust=False).mean() data_bar = (data_dif - data_dea) * 2 self.indexer_value_dic['DIF'] = data_dif.tolist() self.indexer_value_dic['DEA'] = data_dea.tolist() self.indexer_value_dic['HIST'] = data_bar.tolist() def draw_indexer(self): i = 0 for indexer_name, values in self.indexer_value_dic.items(): if indexer_name == 'HIST': n = 0 up_num = [] up_value = [] down_num = [] down_value = [] for v in values: if v >= 0: up_value.append(v) up_num.append(n) else: down_value.append(v) down_num.append(n) n += 1 self.hist_item_up = pg.BarGraphItem(x=up_num, height=up_value, width=0.3, brush='r') self.hist_item_down = pg.BarGraphItem(x=down_num, height=down_value, width=0.3, brush='g') self.plt.addItem(self.hist_item_up) self.plt.addItem(self.hist_item_down) else: c = self.indexer_color_dic[indexer_name][0] self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c) self.plt_dic[indexer_name].setData(values) i += 1 def re_draw_indexer(self): for pname, values in self.indexer_value_dic.items(): if pname == 'HIST': self.plt.removeItem(self.hist_item_up) self.plt.removeItem(self.hist_item_down) n = 0 up_num = [] up_value = [] down_num = [] down_value = [] for v in values: if v >= 0: up_value.append(v) up_num.append(n) else: down_value.append(v) down_num.append(n) n += 1 self.hist_item_up = pg.BarGraphItem(x=up_num, height=up_value, width=0.3, brush='r') self.hist_item_down = pg.BarGraphItem(x=down_num, height=down_value, width=0.3, brush='g') self.plt.addItem(self.hist_item_up) self.plt.addItem(self.hist_item_down) else: self.plt_dic[pname].setData(values) def get_polar_value(self,start_pos, end_pos): max_v = max(max(self.indexer_value_dic['DIF'][start_pos:end_pos]), max(self.indexer_value_dic['DEA'][start_pos:end_pos]), max(self.indexer_value_dic['HIST'][start_pos:end_pos])) min_v = min(min(self.indexer_value_dic['DIF'][start_pos:end_pos]), min(self.indexer_value_dic['DEA'][start_pos:end_pos]), min(self.indexer_value_dic['HIST'][start_pos:end_pos])) return max_v, min_v
/Indexer/RSI.py
# -*- coding: utf-8 -*- from IndexerBase import IndexerBase import numpy as np import talib class RSI(IndexerBase): indexer_name = 'RSI' indexer_name_list = ['RSI'] default_para_dic = { 'N': 5, } def __init__(self, raw_data, plt): super(RSI, self).__init__(raw_data, plt) self.indexer_name_list = ['RSI'] # MA的指标名和参数名都跟参数有关,所以要随参数进行设置 self.indexer_color_dic = { 'RSI': 'blue' } def calculate_indexer_value(self): n = self.para_dic['N'] close_array = np.array(self.raw_data['close'].values, dtype='float') self.indexer_value_dic['RSI'] = talib.RSI(close_array, n) def draw_indexer(self): i = 0 for indexer_name, values in self.indexer_value_dic.items(): c = self.indexer_color_dic[indexer_name][0] self.plt_dic[indexer_name] = self.plt.plot(name=indexer_name, pen=c) self.plt_dic[indexer_name].setData(values) i += 1 def re_draw_indexer(self): for pname, values in self.indexer_value_dic.items(): self.plt_dic[pname].setData(values) def get_polar_value(self,start_pos, end_pos): max_v = max(self.indexer_value_dic['RSI'][start_pos:end_pos]) min_v = min(self.indexer_value_dic['RSI'][start_pos:end_pos]) return max_v, min_v
/Indexer/__init__.py
# -*- coding: utf-8 -*- from IndexerWidget import IndexerWidget from MA import MA from MACD import MACD from RSI import RSI from ATR import ATR from HullMacd import HULL_MACD from EMA import EMA from HullRsi import HULL_RSI from KDJ import KDJ from DMI import DMI indexer_mapping_dic = { 'MA': MA, 'EMA': EMA, 'MACD': MACD, 'HULL_MACD': HULL_MACD, 'RSI': RSI, 'ATR': ATR, 'HULL_RSI': HULL_RSI, 'KDJ': KDJ, 'DMI': DMI } def get_all_indexer_para_name(): result_dic = {} for indexer_name, indexer in indexer_mapping_dic.items(): result_dic[indexer_name] = indexer.default_para_dic.keys() return result_dic def get_all_indexer_para_dic(): result_dic = {} for indexer_name, indexer in indexer_mapping_dic.items(): result_dic[indexer_name] = indexer.default_para_dic return result_dic
/KViewer_new.py
# -*- coding: utf-8 -*- import sys from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from Indexer import * import pyqtgraph as pg import pandas as pd from ChildGraph import ChildGraph import DataInterface.DataInterface as DI class KViewer(QWidget): def __init__(self,): super(KViewer, self).__init__() self.raw_data = None self.main_layout = QVBoxLayout(self) self.setting_layout = QHBoxLayout(self) self.child_graph_layout = QVBoxLayout(self) self.region = pg.LinearRegionItem() self.region_minx = 0 # region控制框的宽度 self.region_maxx = 0 self.range_control_plt = pg.PlotWidget() self.setting_view_btn = QPushButton('显示') self.setting_add_child_btn = QPushButton('增加子图') self.setting_remove_child_btn = QPushButton('删除子图') self.setting_end_date = QDateEdit() self.setting_start_date = QDateEdit() self.setting_bar_type_cb = QComboBox() self.setting_symbol_edit = QLineEdit() # 品种 self.setting_exchange_cb = QComboBox() #self.setting_exchange_edit = QLineEdit() # 交易所 self.setting_contract_edit = QLineEdit() # 合约 self.setting_dic = {} self.setup_ui() self.setup_range_control_view() self.main_child_graph = ChildGraph(False) self.main_child_graph.main_child_plt_changed.connect(self.main_child_plt_changed) self.child_graph_list = [] second_child_graph = ChildGraph(True) self.child_graph_list.append(second_child_graph) self.child_graph_layout.addWidget(self.main_child_graph,stretch=2) self.child_graph_layout.addWidget(second_child_graph, stretch=2) self.child_graph_layout.addWidget(self.range_control_plt, stretch=1) self.main_layout.addLayout(self.setting_layout) self.main_layout.addLayout(self.child_graph_layout) self.setLayout(self.main_layout) def setup_ui(self): self.setup_ui_header() def setup_ui_header(self): self.setting_layout.addWidget(QLabel('交易所')) self.setting_exchange_cb.addItems(['SHFE', 'DCE', 'CZCE', 'CFFEX']) self.setting_layout.addWidget(self.setting_exchange_cb) #self.setting_layout.addWidget(self.setting_exchange_edit) self.setting_layout.addWidget(QLabel('品种')) self.setting_layout.addWidget(self.setting_symbol_edit) self.setting_layout.addWidget(QLabel('合约')) self.setting_layout.addWidget(self.setting_contract_edit) self.setting_layout.addWidget(QLabel('周期')) self.setting_bar_type_cb.addItems(['0','60','300','600','900','1800','3600']) self.setting_layout.addWidget(self.setting_bar_type_cb) self.setting_start_date.setDisplayFormat("yyyy-MM-dd") self.setting_end_date.setDisplayFormat("yyyy-MM-dd") self.setting_layout.addWidget(QLabel('开始日期')) self.setting_layout.addWidget(self.setting_start_date) self.setting_layout.addWidget(QLabel('结束日期')) self.setting_layout.addWidget(self.setting_end_date) self.setting_view_btn.clicked.connect(self.get_setting) self.setting_layout.addWidget(self.setting_view_btn) self.setting_layout.addWidget(self.setting_add_child_btn) self.setting_layout.addWidget(self.setting_remove_child_btn) self.setting_add_child_btn.clicked.connect(self.add_child_graph) self.setting_remove_child_btn.clicked.connect(self.remove_child_graph) def setup_range_control_view(self): # 下面第2个图的范围设置框 self.region.setZValue(10) self.range_control_plt.addItem(self.region) #self.range_control_plt.plot(x=x,y=y, pen="w", name='close') def get_setting(self): exchange = self.setting_exchange_cb.currentText() symbol = self.setting_symbol_edit.text() contract = self.setting_contract_edit.text() bar_type = int(self.setting_bar_type_cb.currentText()) start_date = self.setting_start_date.date().toString("yyyy-MM-dd") end_date = self.setting_end_date.date().toString("yyyy-MM-dd") self.setting_dic['exchange'] = exchange self.setting_dic['symbol'] = symbol self.setting_dic['contract'] = contract self.setting_dic['period'] = bar_type self.setting_dic['start_date'] = start_date self.setting_dic['end_date'] = end_date self.setup_child_graph() def setup_child_graph(self): domain_symbol = '.'.join([self.setting_dic['exchange'], self.setting_dic['symbol']]) contract = self.setting_dic['contract'] bar_type = self.setting_dic['period'] start_date = self.setting_dic['start_date'] end_date = self.setting_dic['end_date'] self.raw_data = DI.getBarBySymbol(domain_symbol, contract, bar_type, start_date + ' 09:00:00', end_date + ' 15:00:00') #self.raw_data = pd.read_excel('RB1810_2018-06-19_1m.xlsx') self.main_child_graph.set_raw_data(self.raw_data) for second_child_graph in self.child_graph_list: second_child_graph.set_raw_data(self.raw_data) self.range_control_plt.plot(self.raw_data['close'], pen="w", name='close') self.region.sigRegionChanged.connect(self.set_child_range) self.region.setRegion([0, 100]) pass def set_child_range(self): #self.region.setZValue(10) minX, maxX = self.region.getRegion() self.main_child_graph.update_visual_range(int(minX), int(maxX)) for second_child_graph in self.child_graph_list: second_child_graph.update_visual_range(int(minX), int(maxX)) def update_region(self,window, viewRange): rgn = viewRange[0] self.region.setRegion(rgn) self.region_minx, self.region_maxx = self.region.getRegion() def main_child_plt_changed(self): self.main_child_graph.plt.sigRangeChanged.connect(self.update_region) self.proxy = pg.SignalProxy(self.main_child_graph.plt.scene().sigMouseMoved, rateLimit=60, slot=self.mouseMoved) def mouseMoved(self, event): pos = event[0] ## using signal proxy turns original arguments into a tuple if self.main_child_graph.plt.sceneBoundingRect().contains(pos): a = self.main_child_graph.plt.boundingRect().getRect() knum = self.region_maxx - self.region_minx # (pos.x()-35)表示鼠标点距离左边框的位置 # (a[2]-35)/knum表示每一根K线占用的像素点数量 # 上面两者两除即为鼠标位置点的K线序号,+minx就是在整个数据列表中的位置 rx = int((pos.x()-35)/((a[2]-35)/knum)+ self.region_minx) index = rx #if index > 0 and index < len(self.t): self.main_child_graph.set_indexer_label(index) for second_child_graph in self.child_graph_list: second_child_graph.set_indexer_label(index) def add_child_graph(self): second_child_graph = ChildGraph(True) self.child_graph_list.append(second_child_graph) if self.raw_data is not None: second_child_graph.set_raw_data(self.raw_data) self.child_graph_layout.insertWidget(len(self.child_graph_list), second_child_graph, stretch=2) def remove_child_graph(self): if self.child_graph_list: self.child_graph_layout.removeWidget(self.child_graph_list[-1]) self.child_graph_list[-1].deleteLater() del self.child_graph_list[-1] pass if __name__ == '__main__': app = QApplication(sys.argv) demo = KViewer() demo.show() sys.exit(app.exec_())
/MainFrame.py
# -*- coding: utf-8 -*- import sys from PyQt5.QtWidgets import * from KViewer_new import KViewer class KViewerMainWindow(QMainWindow): def __init__(self, parent=None): super(KViewerMainWindow, self).__init__(parent) self.resize(800,600) self.setWindowTitle('KViewer') self.tab_widget = QTabWidget() self.tab_num = 0 tab1 = self.new_tab() self.tab_widget.addTab(tab1, 'tab%d'% self.tab_num) self.tab_widget.setTabText(0, 'contract 0') self.main_layout = QVBoxLayout() self.main_layout.addWidget(self.tab_widget) self.setLayout(self.main_layout) self.setCentralWidget(self.tab_widget) tb = self.addToolBar("增删") add = QAction("增加",self) add.triggered.connect(self.add_tab) tb.addAction(add) rem = QAction("删除",self) rem.triggered.connect(self.remove_tab) tb.addAction(rem) #tb.actionTriggered[QAction].connect(self.add_tab) def new_tab(self): ''' tab1 = QWidget() layout = QFormLayout() layout.addRow("name",QLineEdit()) layout.addRow("address",QLineEdit()) tab1.setLayout(layout) return tab1 ''' kv = KViewer() return kv def add_tab(self,): print ('1') tab1 = self.new_tab() self.tab_num += 1 self.tab_widget.addTab(tab1, 'tab %d'% self.tab_num) self.tab_widget.setTabText(self.tab_num, 'contract%d'% self.tab_num) def remove_tab(self): i = self.tab_widget.currentIndex() print ("current index:%d" % i) print ("tab_num:%d" % self.tab_num) self.tab_widget.removeTab(i) self.tab_num -= 1 if __name__ == "__main__": app = QApplication(sys.argv) win = KViewerMainWindow() win.show() sys.exit(app.exec_())
/complex.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'complex.ui' # # Created: Sat Jul 14 20:48:01 2018 # by: pyside-uic 0.2.15 running on PySide 1.2.4 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(803, 600) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.tabWidget = QtGui.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 801, 551)) self.tabWidget.setObjectName("tabWidget") self.tab = QtGui.QWidget() self.tab.setObjectName("tab") self.tabWidget_2 = QtGui.QTabWidget(self.tab) self.tabWidget_2.setGeometry(QtCore.QRect(0, 0, 801, 531)) self.tabWidget_2.setObjectName("tabWidget_2") self.tab_3 = QtGui.QWidget() self.tab_3.setObjectName("tab_3") self.treeWidget = QtGui.QTreeWidget(self.tab_3) self.treeWidget.setGeometry(QtCore.QRect(0, 0, 791, 501)) self.treeWidget.setObjectName("treeWidget") item_0 = QtGui.QTreeWidgetItem(self.treeWidget) item_1 = QtGui.QTreeWidgetItem(item_0) self.tabWidget_2.addTab(self.tab_3, "") self.tab_4 = QtGui.QWidget() self.tab_4.setObjectName("tab_4") self.verticalLayoutWidget = QtGui.QWidget(self.tab_4) self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 791, 501)) self.verticalLayoutWidget.setObjectName("verticalLayoutWidget") self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.dateEdit = QtGui.QDateEdit(self.verticalLayoutWidget) self.dateEdit.setObjectName("dateEdit") self.verticalLayout.addWidget(self.dateEdit) self.calendarWidget = QtGui.QCalendarWidget(self.verticalLayoutWidget) self.calendarWidget.setObjectName("calendarWidget") self.verticalLayout.addWidget(self.calendarWidget) self.tabWidget_2.addTab(self.tab_4, "") self.tabWidget.addTab(self.tab, "") self.tab_2 = QtGui.QWidget() self.tab_2.setObjectName("tab_2") self.groupBox = QtGui.QGroupBox(self.tab_2) self.groupBox.setGeometry(QtCore.QRect(20, 10, 73, 92)) self.groupBox.setObjectName("groupBox") self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox) self.verticalLayout_2.setObjectName("verticalLayout_2") self.radioButton = QtGui.QRadioButton(self.groupBox) self.radioButton.setObjectName("radioButton") self.verticalLayout_2.addWidget(self.radioButton) self.radioButton_2 = QtGui.QRadioButton(self.groupBox) self.radioButton_2.setObjectName("radioButton_2") self.verticalLayout_2.addWidget(self.radioButton_2) self.radioButton_3 = QtGui.QRadioButton(self.groupBox) self.radioButton_3.setObjectName("radioButton_3") self.verticalLayout_2.addWidget(self.radioButton_3) self.groupBox_2 = QtGui.QGroupBox(self.tab_2) self.groupBox_2.setGeometry(QtCore.QRect(440, 30, 321, 151)) self.groupBox_2.setObjectName("groupBox_2") self.layoutWidget = QtGui.QWidget(self.groupBox_2) self.layoutWidget.setGeometry(QtCore.QRect(60, 30, 172, 102)) self.layoutWidget.setObjectName("layoutWidget") self.horizontalLayout = QtGui.QHBoxLayout(self.layoutWidget) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.dial = QtGui.QDial(self.layoutWidget) self.dial.setObjectName("dial") self.horizontalLayout.addWidget(self.dial) self.lcdNumber = QtGui.QLCDNumber(self.layoutWidget) self.lcdNumber.setObjectName("lcdNumber") self.horizontalLayout.addWidget(self.lcdNumber) self.fontComboBox = QtGui.QFontComboBox(self.tab_2) self.fontComboBox.setGeometry(QtCore.QRect(60, 230, 381, 22)) self.fontComboBox.setObjectName("fontComboBox") self.label = QtGui.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(60, 290, 381, 71)) self.label.setScaledContents(False) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label.setWordWrap(False) self.label.setObjectName("label") self.progressBar = QtGui.QProgressBar(self.tab_2) self.progressBar.setGeometry(QtCore.QRect(60, 480, 661, 23)) self.progressBar.setProperty("value", 24) self.progressBar.setObjectName("progressBar") self.tabWidget.addTab(self.tab_2, "") self.tab_5 = QtGui.QWidget() self.tab_5.setObjectName("tab_5") self.verticalLayoutWidget_2 = QtGui.QWidget(self.tab_5) self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(-1, -1, 791, 531)) self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2") self.verticalLayout_3 = QtGui.QVBoxLayout(self.verticalLayoutWidget_2) self.verticalLayout_3.setContentsMargins(0, 0, 0, 0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.tabWidget.addTab(self.tab_5, "") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 803, 23)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(2) self.tabWidget_2.setCurrentIndex(1) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8)) self.treeWidget.headerItem().setText(0, QtGui.QApplication.translate("MainWindow", "第一列", None, QtGui.QApplication.UnicodeUTF8)) self.treeWidget.headerItem().setText(1, QtGui.QApplication.translate("MainWindow", "New Column", None, QtGui.QApplication.UnicodeUTF8)) __sortingEnabled = self.treeWidget.isSortingEnabled() self.treeWidget.setSortingEnabled(False) self.treeWidget.topLevelItem(0).setText(0, QtGui.QApplication.translate("MainWindow", "子条目一", None, QtGui.QApplication.UnicodeUTF8)) self.treeWidget.topLevelItem(0).child(0).setText(0, QtGui.QApplication.translate("MainWindow", "子条目一一", None, QtGui.QApplication.UnicodeUTF8)) self.treeWidget.setSortingEnabled(__sortingEnabled) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), QtGui.QApplication.translate("MainWindow", "树", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), QtGui.QApplication.translate("MainWindow", "日历", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("MainWindow", "Tab 1", None, QtGui.QApplication.UnicodeUTF8)) self.groupBox.setTitle(QtGui.QApplication.translate("MainWindow", "功能选择", None, QtGui.QApplication.UnicodeUTF8)) self.radioButton.setText(QtGui.QApplication.translate("MainWindow", "默认", None, QtGui.QApplication.UnicodeUTF8)) self.radioButton_2.setText(QtGui.QApplication.translate("MainWindow", "重置", None, QtGui.QApplication.UnicodeUTF8)) self.radioButton_3.setText(QtGui.QApplication.translate("MainWindow", "选项3", None, QtGui.QApplication.UnicodeUTF8)) self.groupBox_2.setTitle(QtGui.QApplication.translate("MainWindow", "移动刻度盘", None, QtGui.QApplication.UnicodeUTF8)) self.label.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtGui.QApplication.translate("MainWindow", "Tab 2", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), QtGui.QApplication.translate("MainWindow", "绘图", None, QtGui.QApplication.UnicodeUTF8))
/complex2.py
# -*- coding: utf-8 -*- #from PySide import QtCore, QtGui from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(803, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setGeometry(QtCore.QRect(0, 0, 801, 551)) self.tabWidget.setObjectName("tabWidget") self.tab = QtWidgets.QWidget() self.tab.setObjectName("tab") self.tabWidget_2 = QtWidgets.QTabWidget(self.tab) self.tabWidget_2.setGeometry(QtCore.QRect(0, 0, 801, 531)) self.tabWidget_2.setObjectName("tabWidget_2") self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName("tab_3") self.treeWidget = QtWidgets.QTreeWidget(self.tab_3) self.treeWidget.setGeometry(QtCore.QRect(0, 0, 791, 501)) self.treeWidget.setObjectName("treeWidget") item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget) item_1 = QtWidgets.QTreeWidgetItem(item_0) self.tabWidget_2.addTab(self.tab_3, "") self.tab_4 = QtWidgets.QWidget() self.tab_4.setObjectName("tab_4") self.verticalLayoutWidget = QtWidgets.QWidget(self.tab_4) self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 791, 501)) self.verticalLayoutWidget.setObjectName("verticalLayoutWidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.dateEdit = QtWidgets.QDateEdit(self.verticalLayoutWidget) self.dateEdit.setObjectName("dateEdit") self.verticalLayout.addWidget(self.dateEdit) self.calendarWidget = QtWidgets.QCalendarWidget(self.verticalLayoutWidget) self.calendarWidget.setObjectName("calendarWidget") self.verticalLayout.addWidget(self.calendarWidget) self.tabWidget_2.addTab(self.tab_4, "") self.tabWidget.addTab(self.tab, "") self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName("tab_2") self.groupBox = QtWidgets.QGroupBox(self.tab_2) self.groupBox.setGeometry(QtCore.QRect(20, 10, 73, 92)) self.groupBox.setObjectName("groupBox") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.groupBox) self.verticalLayout_2.setObjectName("verticalLayout_2") self.radioButton = QtWidgets.QRadioButton(self.groupBox) self.radioButton.setObjectName("radioButton") self.verticalLayout_2.addWidget(self.radioButton) self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox) self.radioButton_2.setObjectName("radioButton_2") self.verticalLayout_2.addWidget(self.radioButton_2) self.radioButton_3 = QtWidgets.QRadioButton(self.groupBox) self.radioButton_3.setObjectName("radioButton_3") self.verticalLayout_2.addWidget(self.radioButton_3) self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2) self.groupBox_2.setGeometry(QtCore.QRect(440, 30, 321, 151)) self.groupBox_2.setObjectName("groupBox_2") self.widget = QtWidgets.QWidget(self.groupBox_2) self.widget.setGeometry(QtCore.QRect(60, 30, 172, 102)) self.widget.setObjectName("widget") self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.dial = QtWidgets.QDial(self.widget) self.dial.setObjectName("dial") self.horizontalLayout.addWidget(self.dial) self.lcdNumber = QtWidgets.QLCDNumber(self.widget) self.lcdNumber.setObjectName("lcdNumber") self.horizontalLayout.addWidget(self.lcdNumber) self.fontComboBox = QtWidgets.QFontComboBox(self.tab_2) self.fontComboBox.setGeometry(QtCore.QRect(60, 230, 381, 22)) self.fontComboBox.setObjectName("fontComboBox") self.label = QtWidgets.QLabel(self.tab_2) self.label.setGeometry(QtCore.QRect(60, 290, 381, 71)) self.label.setScaledContents(False) self.label.setAlignment(QtCore.Qt.AlignCenter) self.label.setWordWrap(False) self.label.setObjectName("label") self.progressBar = QtWidgets.QProgressBar(self.tab_2) self.progressBar.setGeometry(QtCore.QRect(60, 480, 661, 23)) self.progressBar.setProperty("value", 24) self.progressBar.setObjectName("progressBar") self.tabWidget.addTab(self.tab_2, "") self.tab_5 = QtWidgets.QWidget() self.tab_5.setObjectName("tab_5") self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.tab_5) self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(-1, -1, 791, 531)) self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2) self.verticalLayout_3.setContentsMargins(0, 0, 0, 0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.tabWidget.addTab(self.tab_5, "") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 803, 23)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(1) self.tabWidget_2.setCurrentIndex(1) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow")) self.treeWidget.headerItem().setText(0, QtWidgets.QApplication.translate("MainWindow", u"第一列")) self.treeWidget.headerItem().setText(1, QtWidgets.QApplication.translate("MainWindow", "New Column")) __sortingEnabled = self.treeWidget.isSortingEnabled() self.treeWidget.setSortingEnabled(False) self.treeWidget.topLevelItem(0).setText(0, QtWidgets.QApplication.translate("MainWindow", u"子条目一")) self.treeWidget.topLevelItem(0).child(0).setText(0, QtWidgets.QApplication.translate("MainWindow", u"子条目一一")) self.treeWidget.setSortingEnabled(__sortingEnabled) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), QtWidgets.QApplication.translate("MainWindow", u"树")) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), QtWidgets.QApplication.translate("MainWindow", u"日历")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtWidgets.QApplication.translate("MainWindow", "Tab 1")) self.groupBox.setTitle(QtWidgets.QApplication.translate("MainWindow", u"功能选择")) self.radioButton.setText(QtWidgets.QApplication.translate("MainWindow", u"默认")) self.radioButton_2.setText(QtWidgets.QApplication.translate("MainWindow", u"重置")) self.radioButton_3.setText(QtWidgets.QApplication.translate("MainWindow", u"选项3")) self.groupBox_2.setTitle(QtWidgets.QApplication.translate("MainWindow", u"移动刻度盘")) self.label.setText(QtWidgets.QApplication.translate("MainWindow", "TextLabel")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtWidgets.QApplication.translate("MainWindow", "Tab 2")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), QtWidgets.QApplication.translate("MainWindow", "绘图"))
/complexExample.py
# -*- coding: utf-8 -*- import complex2 from PyQt5 import QtCore, QtWidgets, QtGui import sys import time import pyqtgraph as pg import pandas as pd import tushare as ts import datetime from matplotlib.pylab import date2num class MainWindow(object): def __init__(self): app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() self.ui = complex2.Ui_MainWindow() self.ui.setupUi(MainWindow) self.update_date() self.update_calendar() self.set_lcd() self.set_dial() #self.zero_progress() #self.click_radio3() self.update_progressbar() self.set_font() # 数据要解好,供多个用,这样才省事 #hist_data = ts.get_hist_data('600519', start='2010-05-01', end='2017-11-04') #hist_data.to_csv('hist_data.csv') hist_data = pd.read_csv('hist_data.csv') self.t = range(hist_data.shape[0]) self.open = hist_data.open.tolist() self.high = hist_data.high.tolist() self.low = hist_data.low.tolist() self.close = hist_data.close.tolist() packdate = zip(self.t,self.open, self.close, self.low, self.high) ma5 = hist_data.close.rolling(5).mean().tolist() self.plt1 = self.chart(hist_data['date'].tolist(),packdate) self.plt2 = self.chart2(self.t, self.close) self.plt1.plot(ma5) # 下面第2个图的范围设置框 self.region = pg.LinearRegionItem() self.region.setZValue(10) self.region.sigRegionChanged.connect(self.update_plt1) self.plt1.sigRangeChanged.connect(self.updateRegion) self.region.setRegion([0, 100]) # Add the LinearRegionItem to the ViewBox, but tell the ViewBox to exclude this # item when doing auto-range calculations. self.plt2.addItem(self.region, ignoreBounds=True) self.ui.verticalLayout_3.addWidget(self.plt1) self.ui.verticalLayout_3.addWidget(self.plt2) MainWindow.show() sys.exit(app.exec_()) def update_date(self): self.ui.dateEdit.setDate(self.ui.calendarWidget.selectedDate()) def update_calendar(self): self.ui.calendarWidget.selectionChanged.connect(self.update_date) def set_lcd(self): self.ui.lcdNumber.display(self.ui.dial.value()) def set_dial(self): self.ui.dial.valueChanged['int'].connect(self.set_lcd) #按钮2重置进度栏 def zero_progress(self): self.ui.radioButton_2.clicked.connect(self.ui.progressBar.reset) def update_progress(self): value = self.ui.lcdNumber.value() self.ui.progressBar.setValue(value) def click_radio3(self): self.ui.radioButton_3.clicked.connect(self.update_progress) def set_font(self): self.ui.fontComboBox.activated['QString'].connect(self.ui.label.setText) def progressBar_counter(self, start_value=0): self.run_thread = RunThread(parent=None, counter_start=start_value) self.run_thread.start() self.run_thread.counter_value.connect(self.set_progressbar) def set_progressbar(self, counter): if not self.stop_progress: self.ui.progressBar.setValue(counter) # 多进程的方式控制progressBar # RunThread会一直计时,并发出int类型的信号 # start_progressbar开始时,会先取得progressbar的值,然后再往下数,这样ui上看起来progressbar是连着上一次中断的位置往下的 # 实际上点stop的时候,RunThread进程已经结束,重新开始时是新的线程了 def update_progressbar(self): self.ui.radioButton.clicked.connect(self.start_progressbar) self.ui.radioButton_2.clicked.connect(self.stop_progressbar) self.ui.radioButton_3.clicked.connect(self.reset_progressbar) self.progress_value = 0 self.stop_progress = False def start_progressbar(self): self.stop_progress = False self.progress_value = self.ui.progressBar.value() self.progressBar_counter(self.progress_value) def stop_progressbar(self): self.stop_progress = True try: self.run_thread.stop() except: pass def reset_progressbar(self): self.progress_value = 0 self.ui.progressBar.reset() #self.stop_progress = False self.stop_progressbar() def chart(self,date_list, data_list): """ data_list = [] i = 0 for dates, row in hist_data.iterrows(): #date_time = datetime.datetime.strptime(dates, "%Y-%m-%d") #t = date2num(date_time) open, high, close, low = row[:4] datas = (i, open, close, low, high) i+=1 data_list.append(datas) # axis_dic = dict(enumerate(axis)) #print (data_list) """ item = CandlestickItem(data_list) axis = DateAxis(date_strings=date_list, orientation='bottom') plt = pg.PlotWidget(axisItems={'bottom': axis}) #plt = pg.PlotWidget() plt.addItem(item, ) # plt.setXRange() plt.showGrid(x=True, y=True) return plt def chart2(self,x,y): #y = hist_data['close'].tolist() #x_datas =hist_data.index.tolist() #x=range(len(y)) #for x1 in x_datas: # date_time = datetime.datetime.strptime(x1, "%Y-%m-%d") # x.append(date2num(date_time)) # axis_dic = dict(enumerate(axis)) #print (close_list) plt = pg.PlotWidget() plt.addLegend() # 加上图标 plt.plot(x=x,y=y, pen="w", name='close') #plt.addItem(item, ) # plt.setXRange() #plt.showGrid(x=True, y=True) return plt def update_plt1(self): self.region.setZValue(10) minX, maxX = self.region.getRegion() #Y轴自适应 int_minY = max(0,int(minX)) int_maxY = max(1, int(maxX)) minY = min(self.low[int_minY:int_maxY]) - 5 maxY = max(self.high[int_minY:int_maxY]) +5 self.plt1.setYRange(minY, maxY) self.plt1.setXRange(minX, maxX, padding=0) def updateRegion(self,window, viewRange): rgn = viewRange[0] self.region.setRegion(rgn) class RunThread(QtCore.QThread): # 定义一个信号,内容为int counter_value = QtCore.pyqtSignal(int) def __init__(self, parent=None, counter_start=0): super(RunThread, self).__init__(parent) self.counter = counter_start self.is_running = True def run(self): while self.counter < 100 and self.is_running == True: time.sleep(0.1) self.counter += 1 print (self.counter) self.counter_value.emit(self.counter) # 发出信号 def stop(self): self.is_running = False print ("线程停止中...") self.terminate() class DateAxis(pg.AxisItem): def __init__(self, date_strings, orientation): pg.AxisItem.__init__(self,orientation) self.date_strings = date_strings self.len = len(self.date_strings) def tickStrings(self, values, scale, spacing): """ strns = [] rng = max(values) - min(values) # if rng < 120: # return pg.AxisItem.tickStrings(self, values, scale, spacing) if rng < 3600 * 24: string = '%H:%M:%S' label1 = '%b %d -' label2 = ' %b %d, %Y' elif rng >= 3600 * 24 and rng < 3600 * 24 * 30: string = '%d' label1 = '%b - ' label2 = '%b, %Y' elif rng >= 3600 * 24 * 30 and rng < 3600 * 24 * 30 * 24: string = '%b' label1 = '%Y -' label2 = ' %Y' elif rng >= 3600 * 24 * 30 * 24: string = '%Y' label1 = '' label2 = '' for x in values: try: strns.append(time.strftime(string, time.localtime(x))) except ValueError: ## Windows can't handle dates before 1970 strns.append('') try: label = time.strftime(label1, time.localtime(min(values))) + time.strftime(label2, time.localtime(max(values))) except ValueError: label = '' # self.setLabel(text=label) return strns """ #print values strns = [] for x in values: x1 = int(x) if 0 <= x1 < self.len: strns.append(self.date_strings[x1]) else: strns.append('') return strns ## Create a subclass of GraphicsObject. ## The only required methods are paint() and boundingRect() ## (see QGraphicsItem documentation) class CandlestickItem(pg.GraphicsObject): def __init__(self, data): pg.GraphicsObject.__init__(self) self.data = data ## data must have fields: time, open, close, min, max self.generatePicture() def generatePicture(self): ## pre-computing a QPicture object allows paint() to run much more quickly, ## rather than re-drawing the shapes every time. self.picture = QtGui.QPicture() p = QtGui.QPainter(self.picture) p.setPen(pg.mkPen('w')) w = (self.data[1][0] - self.data[0][0]) / 3. for (t, open, close, min, max) in self.data: p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max)) if open > close: p.setBrush(pg.mkBrush('r')) else: p.setBrush(pg.mkBrush('g')) p.drawRect(QtCore.QRectF(t - w, open, w * 2, close - open)) p.end() def paint(self, p, *args): p.drawPicture(0, 0, self.picture) def boundingRect(self): ## boundingRect _must_ indicate the entire area that will be drawn on ## or else we will get artifacts and possibly crashing. ## (in this case, QPicture does all the work of computing the bouning rect for us) return QtCore.QRectF(self.picture.boundingRect()) if __name__=='__main__': MainWindow()
/decouple_window.py
# -*- coding: utf-8 -*- import nullWindow from PyQt5 import QtCore, QtWidgets, QtGui if __name__=='__main__': import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = nullWindow.Ui_MainWindow() ui.setupUi(MainWindow) ui.tableWidget.setItem(0,0,QtWidgets.QTableWidgetItem(u'数据1')) ui.tableWidget.setItem(1, 1, QtWidgets.QTableWidgetItem(u'数据2')) ui.tableWidget.setItem(2, 2, QtWidgets.QTableWidgetItem(u'数据3')) MainWindow.show() sys.exit(app.exec_())
/indexer.py
# -*- coding: utf-8 -*- """ 指标类,用于管理指标相内容: 1.参数,包括参数控件的内容 para_name:参数名列表 para_dic: 参数字典,键为参数名,值为参数值 para_widgets_dic: 参数按键字典, 键为参数名,值为控件名 2.数据 data_dic:数据字典,键为参数名,值为数据 3.画图 plt:主图控件 plt_dic:子图控件字典,键为参数名,值为子图控件 """ class IndexerBase(object): color_list = ['w', 'y', 'c', 'r', 'g'] def __init__(self, plt, ): self.is_avtived = True self.plt = plt self.para_name = [] self.para_dic = {} self.para_widgets_dic = {} self.data_dic = {} self.plt_dic = {} pass def draw(self): pass def reflesh(self): pass def set_data(self): pass def set_all_para(self): for k, v in self.para_widgets_dic.items(): p = self.set_para(v) if p: self.para_dic[k] = p else: self.para_dic[k] = 0 self.set_data() def set_para(self, lindEdit_widgets): t = lindEdit_widgets.text() if t: try: p=int(t) return p except: print (u"请检查输入内容,只接受数字") return None def get_indexer_value_text(self, pos): # 根据传入的位置返回一个指标值的字符串 t = "" i = 0 for pname in self.para_name: c = self.color_list[i] t += "<span style='color: %s'>%s=%0.3f </span>" % (c, pname, self.data_dic[pname][pos]) i += 1 return t class Indexer_MA(IndexerBase): def __init__(self, plt, rawdata, para_widgets_list): super(IndexerBase, self).__init__() self.plt = plt self.is_avtived = True self.plt = plt self.para_name = [] self.para_dic = {} self.para_widgets_dic = {} self.data_dic = {} self.plt_dic = {} self.para_name = ['N1', 'N2', 'N3', 'N4', 'N5'] # 获取原始数据 self.series_close = rawdata['close'] # 获取参数 for i in range(len(para_widgets_list)): para_name = self.para_name[i] pwidget = para_widgets_list[i] self.para_widgets_dic[para_name] = pwidget self.set_all_para() # 准备数据 self.set_data() pass def draw(self): if self.is_avtived: for i in range(len(self.para_name)): pname = self.para_name[i] if pname in self.para_dic.keys(): self.plt_dic[pname]=self.plt.plot(name=pname,pen=self.color_list[i]) self.plt_dic[pname].setData(self.data_dic[pname]) def reflesh(self): for k, d in self.data_dic.items(): self.plt_dic[k].setData(d) def set_data(self,): for k, d in self.para_dic.items(): self.data_dic[k] = self.series_close.rolling(d).mean()
/kviewer1.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'kviewer.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth()) self.centralwidget.setSizePolicy(sizePolicy) self.centralwidget.setObjectName("centralwidget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout_2.setObjectName("verticalLayout_2") self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setObjectName("tabWidget") self.tab_para = QtWidgets.QWidget() self.tab_para.setObjectName("tab_para") self.groupBox_3 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_3.setGeometry(QtCore.QRect(400, 110, 361, 121)) self.groupBox_3.setObjectName("groupBox_3") self.label = QtWidgets.QLabel(self.groupBox_3) self.label.setGeometry(QtCore.QRect(30, 60, 41, 16)) self.label.setObjectName("label") self.lineEdit_macd_short = QtWidgets.QLineEdit(self.groupBox_3) self.lineEdit_macd_short.setEnabled(False) self.lineEdit_macd_short.setGeometry(QtCore.QRect(80, 60, 41, 20)) self.lineEdit_macd_short.setObjectName("lineEdit_macd_short") self.label_2 = QtWidgets.QLabel(self.groupBox_3) self.label_2.setGeometry(QtCore.QRect(150, 60, 31, 16)) self.label_2.setObjectName("label_2") self.lineEdit_macd_long = QtWidgets.QLineEdit(self.groupBox_3) self.lineEdit_macd_long.setEnabled(False) self.lineEdit_macd_long.setGeometry(QtCore.QRect(190, 60, 41, 20)) self.lineEdit_macd_long.setObjectName("lineEdit_macd_long") self.label_3 = QtWidgets.QLabel(self.groupBox_3) self.label_3.setGeometry(QtCore.QRect(260, 60, 21, 16)) self.label_3.setObjectName("label_3") self.lineEdit_macd_m = QtWidgets.QLineEdit(self.groupBox_3) self.lineEdit_macd_m.setEnabled(False) self.lineEdit_macd_m.setGeometry(QtCore.QRect(280, 60, 41, 20)) self.lineEdit_macd_m.setObjectName("lineEdit_macd_m") self.checkBox_macd = QtWidgets.QCheckBox(self.groupBox_3) self.checkBox_macd.setGeometry(QtCore.QRect(30, 30, 71, 16)) self.checkBox_macd.setObjectName("checkBox_macd") self.groupBox_2 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_2.setGeometry(QtCore.QRect(20, 110, 371, 121)) self.groupBox_2.setObjectName("groupBox_2") self.gridLayoutWidget = QtWidgets.QWidget(self.groupBox_2) self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 40, 351, 80)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_5.setObjectName("label_5") self.gridLayout_2.addWidget(self.label_5, 0, 0, 1, 1) self.label_7 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_7.setObjectName("label_7") self.gridLayout_2.addWidget(self.label_7, 0, 4, 1, 1) self.lineEdit_ma_n3 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n3.setEnabled(False) self.lineEdit_ma_n3.setObjectName("lineEdit_ma_n3") self.gridLayout_2.addWidget(self.lineEdit_ma_n3, 0, 5, 1, 1) self.lineEdit_ma_n2 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n2.setEnabled(False) self.lineEdit_ma_n2.setObjectName("lineEdit_ma_n2") self.gridLayout_2.addWidget(self.lineEdit_ma_n2, 0, 3, 1, 1) self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_6.setObjectName("label_6") self.gridLayout_2.addWidget(self.label_6, 0, 2, 1, 1) self.lineEdit_ma_n1 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n1.setEnabled(False) self.lineEdit_ma_n1.setObjectName("lineEdit_ma_n1") self.gridLayout_2.addWidget(self.lineEdit_ma_n1, 0, 1, 1, 1) self.label_8 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_8.setObjectName("label_8") self.gridLayout_2.addWidget(self.label_8, 1, 0, 1, 1) self.lineEdit_ma_n4 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n4.setEnabled(False) self.lineEdit_ma_n4.setObjectName("lineEdit_ma_n4") self.gridLayout_2.addWidget(self.lineEdit_ma_n4, 1, 1, 1, 1) self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_9.setObjectName("label_9") self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1) self.lineEdit_ma_n5 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n5.setEnabled(False) self.lineEdit_ma_n5.setObjectName("lineEdit_ma_n5") self.gridLayout_2.addWidget(self.lineEdit_ma_n5, 1, 3, 1, 1) self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_10.setObjectName("label_10") self.gridLayout_2.addWidget(self.label_10, 1, 4, 1, 1) self.comboBox_ma = QtWidgets.QComboBox(self.gridLayoutWidget) self.comboBox_ma.setEnabled(False) self.comboBox_ma.setObjectName("comboBox_ma") self.comboBox_ma.addItem("") self.comboBox_ma.addItem("") self.gridLayout_2.addWidget(self.comboBox_ma, 1, 5, 1, 1) self.checkBox_ma = QtWidgets.QCheckBox(self.groupBox_2) self.checkBox_ma.setGeometry(QtCore.QRect(20, 20, 71, 16)) self.checkBox_ma.setChecked(False) self.checkBox_ma.setObjectName("checkBox_ma") self.groupBox_5 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_5.setGeometry(QtCore.QRect(400, 10, 361, 91)) self.groupBox_5.setObjectName("groupBox_5") self.pushButton_opr_file = QtWidgets.QPushButton(self.groupBox_5) self.pushButton_opr_file.setGeometry(QtCore.QRect(30, 40, 75, 23)) self.pushButton_opr_file.setObjectName("pushButton_opr_file") self.label_opr = QtWidgets.QLabel(self.groupBox_5) self.label_opr.setGeometry(QtCore.QRect(130, 40, 54, 12)) self.label_opr.setObjectName("label_opr") self.groupBox_4 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_4.setGeometry(QtCore.QRect(20, 10, 371, 90)) self.groupBox_4.setObjectName("groupBox_4") self.gridLayoutWidget_2 = QtWidgets.QWidget(self.groupBox_4) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(9, 20, 351, 61)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName("gridLayout_3") self.lineEdit_contract = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_contract.setObjectName("lineEdit_contract") self.gridLayout_3.addWidget(self.lineEdit_contract, 0, 2, 1, 1) self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_4.setObjectName("label_4") self.gridLayout_3.addWidget(self.label_4, 0, 3, 1, 1) self.label_12 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_12.setObjectName("label_12") self.gridLayout_3.addWidget(self.label_12, 0, 0, 1, 1) self.comboBox_bar = QtWidgets.QComboBox(self.gridLayoutWidget_2) self.comboBox_bar.setObjectName("comboBox_bar") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.gridLayout_3.addWidget(self.comboBox_bar, 0, 4, 1, 1) self.label_13 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_13.setObjectName("label_13") self.gridLayout_3.addWidget(self.label_13, 1, 0, 1, 1) self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_14.setObjectName("label_14") self.gridLayout_3.addWidget(self.label_14, 1, 3, 1, 1) self.dateEdit_end = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dateEdit_end.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 30), QtCore.QTime(0, 0, 0))) self.dateEdit_end.setObjectName("dateEdit_end") self.gridLayout_3.addWidget(self.dateEdit_end, 1, 4, 1, 1) self.dateEdit_start = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dateEdit_start.setObjectName("dateEdit_start") self.gridLayout_3.addWidget(self.dateEdit_start, 1, 2, 1, 1) self.pushButton_set_para = QtWidgets.QPushButton(self.tab_para) self.pushButton_set_para.setGeometry(QtCore.QRect(360, 360, 75, 23)) self.pushButton_set_para.setObjectName("pushButton_set_para") self.groupBox_6 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_6.setGeometry(QtCore.QRect(20, 240, 371, 80)) self.groupBox_6.setObjectName("groupBox_6") self.lineEdit_kdj_n = QtWidgets.QLineEdit(self.groupBox_6) self.lineEdit_kdj_n.setEnabled(False) self.lineEdit_kdj_n.setGeometry(QtCore.QRect(40, 50, 51, 20)) self.lineEdit_kdj_n.setObjectName("lineEdit_kdj_n") self.lineEdit_kdj_m1 = QtWidgets.QLineEdit(self.groupBox_6) self.lineEdit_kdj_m1.setEnabled(False) self.lineEdit_kdj_m1.setGeometry(QtCore.QRect(160, 50, 51, 20)) self.lineEdit_kdj_m1.setObjectName("lineEdit_kdj_m1") self.lineEdit_kdj_m2 = QtWidgets.QLineEdit(self.groupBox_6) self.lineEdit_kdj_m2.setEnabled(False) self.lineEdit_kdj_m2.setGeometry(QtCore.QRect(270, 50, 51, 20)) self.lineEdit_kdj_m2.setObjectName("lineEdit_kdj_m2") self.label_11 = QtWidgets.QLabel(self.groupBox_6) self.label_11.setGeometry(QtCore.QRect(20, 50, 21, 16)) self.label_11.setObjectName("label_11") self.label_15 = QtWidgets.QLabel(self.groupBox_6) self.label_15.setGeometry(QtCore.QRect(140, 50, 21, 16)) self.label_15.setObjectName("label_15") self.label_16 = QtWidgets.QLabel(self.groupBox_6) self.label_16.setGeometry(QtCore.QRect(250, 50, 21, 16)) self.label_16.setObjectName("label_16") self.checkBox_kdj = QtWidgets.QCheckBox(self.groupBox_6) self.checkBox_kdj.setGeometry(QtCore.QRect(20, 20, 71, 16)) self.checkBox_kdj.setObjectName("checkBox_kdj") self.groupBox_7 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_7.setGeometry(QtCore.QRect(400, 240, 361, 81)) self.groupBox_7.setObjectName("groupBox_7") self.lineEdit_dmi_n = QtWidgets.QLineEdit(self.groupBox_7) self.lineEdit_dmi_n.setEnabled(False) self.lineEdit_dmi_n.setGeometry(QtCore.QRect(70, 50, 41, 20)) self.lineEdit_dmi_n.setObjectName("lineEdit_dmi_n") self.lineEdit_dmi_m = QtWidgets.QLineEdit(self.groupBox_7) self.lineEdit_dmi_m.setEnabled(False) self.lineEdit_dmi_m.setGeometry(QtCore.QRect(190, 50, 41, 20)) self.lineEdit_dmi_m.setObjectName("lineEdit_dmi_m") self.label_17 = QtWidgets.QLabel(self.groupBox_7) self.label_17.setGeometry(QtCore.QRect(40, 50, 31, 16)) self.label_17.setObjectName("label_17") self.label_18 = QtWidgets.QLabel(self.groupBox_7) self.label_18.setGeometry(QtCore.QRect(170, 50, 21, 16)) self.label_18.setObjectName("label_18") self.checkBox_dmi = QtWidgets.QCheckBox(self.groupBox_7) self.checkBox_dmi.setGeometry(QtCore.QRect(30, 30, 71, 16)) self.checkBox_dmi.setObjectName("checkBox_dmi") self.tabWidget.addTab(self.tab_para, "") self.tab_plot = QtWidgets.QWidget() self.tab_plot.setObjectName("tab_plot") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab_plot) self.verticalLayout_3.setContentsMargins(0, 0, 0, 0) self.verticalLayout_3.setObjectName("verticalLayout_3") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setContentsMargins(-1, -1, -1, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_plot_field = QtWidgets.QHBoxLayout() self.horizontalLayout_plot_field.setObjectName("horizontalLayout_plot_field") self.label_para = QtWidgets.QLabel(self.tab_plot) self.label_para.setFrameShape(QtWidgets.QFrame.Box) self.label_para.setObjectName("label_para") self.horizontalLayout_plot_field.addWidget(self.label_para) self.label_point = QtWidgets.QLabel(self.tab_plot) self.label_point.setFrameShape(QtWidgets.QFrame.Box) self.label_point.setObjectName("label_point") self.horizontalLayout_plot_field.addWidget(self.label_point) self.label_file = QtWidgets.QLabel(self.tab_plot) self.label_file.setFrameShape(QtWidgets.QFrame.Box) self.label_file.setObjectName("label_file") self.horizontalLayout_plot_field.addWidget(self.label_file) self.verticalLayout.addLayout(self.horizontalLayout_plot_field) self.verticalLayout_3.addLayout(self.verticalLayout) self.tabWidget.addTab(self.tab_plot, "") self.verticalLayout_2.addWidget(self.tabWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.groupBox_3.setTitle(_translate("MainWindow", "MACD参数")) self.label.setText(_translate("MainWindow", "Short")) self.lineEdit_macd_short.setText(_translate("MainWindow", "12")) self.label_2.setText(_translate("MainWindow", "Long")) self.lineEdit_macd_long.setText(_translate("MainWindow", "26")) self.label_3.setText(_translate("MainWindow", "M")) self.lineEdit_macd_m.setText(_translate("MainWindow", "9")) self.checkBox_macd.setText(_translate("MainWindow", "MACD")) self.groupBox_2.setTitle(_translate("MainWindow", "MA参数")) self.label_5.setText(_translate("MainWindow", "N1")) self.label_7.setText(_translate("MainWindow", "N3")) self.lineEdit_ma_n3.setText(_translate("MainWindow", "20")) self.lineEdit_ma_n2.setText(_translate("MainWindow", "10")) self.label_6.setText(_translate("MainWindow", "N2")) self.lineEdit_ma_n1.setText(_translate("MainWindow", "5")) self.label_8.setText(_translate("MainWindow", "N4")) self.lineEdit_ma_n4.setText(_translate("MainWindow", "30")) self.label_9.setText(_translate("MainWindow", "N5")) self.lineEdit_ma_n5.setText(_translate("MainWindow", "50")) self.label_10.setText(_translate("MainWindow", "算法")) self.comboBox_ma.setItemText(0, _translate("MainWindow", "MA")) self.comboBox_ma.setItemText(1, _translate("MainWindow", "EMA")) self.checkBox_ma.setText(_translate("MainWindow", "MA")) self.groupBox_5.setTitle(_translate("MainWindow", "回测文件")) self.pushButton_opr_file.setText(_translate("MainWindow", "打开")) self.label_opr.setText(_translate("MainWindow", "TextLabel")) self.groupBox_4.setTitle(_translate("MainWindow", "公共参数")) self.lineEdit_contract.setText(_translate("MainWindow", "RB1810")) self.label_4.setText(_translate("MainWindow", "周期")) self.label_12.setText(_translate("MainWindow", "合约")) self.comboBox_bar.setItemText(0, _translate("MainWindow", "3600")) self.comboBox_bar.setItemText(1, _translate("MainWindow", "1800")) self.comboBox_bar.setItemText(2, _translate("MainWindow", "900")) self.comboBox_bar.setItemText(3, _translate("MainWindow", "600")) self.comboBox_bar.setItemText(4, _translate("MainWindow", "300")) self.comboBox_bar.setItemText(5, _translate("MainWindow", "60")) self.comboBox_bar.setItemText(6, _translate("MainWindow", "0")) self.label_13.setText(_translate("MainWindow", "开始时间")) self.label_14.setText(_translate("MainWindow", "结束时间")) self.pushButton_set_para.setText(_translate("MainWindow", "画图")) self.groupBox_6.setTitle(_translate("MainWindow", "KDJ参数")) self.lineEdit_kdj_n.setText(_translate("MainWindow", "9")) self.lineEdit_kdj_m1.setText(_translate("MainWindow", "3")) self.lineEdit_kdj_m2.setText(_translate("MainWindow", "3")) self.label_11.setText(_translate("MainWindow", "N")) self.label_15.setText(_translate("MainWindow", "M1")) self.label_16.setText(_translate("MainWindow", "M2")) self.checkBox_kdj.setText(_translate("MainWindow", "KDJ")) self.groupBox_7.setTitle(_translate("MainWindow", "DMI参数")) self.lineEdit_dmi_n.setText(_translate("MainWindow", "14")) self.lineEdit_dmi_m.setText(_translate("MainWindow", "6")) self.label_17.setText(_translate("MainWindow", "N")) self.label_18.setText(_translate("MainWindow", "M")) self.checkBox_dmi.setText(_translate("MainWindow", "DMI")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_para), _translate("MainWindow", "参数设置")) self.label_para.setText(_translate("MainWindow", "TextLabel")) self.label_point.setText(_translate("MainWindow", "TextLabel")) self.label_file.setText(_translate("MainWindow", "TextLabel")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_plot), _translate("MainWindow", "行情"))
/kviewer2.py
# -*- coding: utf-8 -*- from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth()) self.centralwidget.setSizePolicy(sizePolicy) self.centralwidget.setObjectName("centralwidget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout_2.setObjectName("verticalLayout_2") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.gridLayout = QtWidgets.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.groupBox = QtWidgets.QGroupBox(self.centralwidget) self.groupBox.setObjectName("groupBox") self.checkBox_ma = QtWidgets.QCheckBox(self.groupBox) self.checkBox_ma.setGeometry(QtCore.QRect(40, 10, 71, 16)) self.checkBox_ma.setObjectName("checkBox_ma") self.checkBox_dmi = QtWidgets.QCheckBox(self.groupBox) self.checkBox_dmi.setGeometry(QtCore.QRect(40, 30, 71, 16)) self.checkBox_dmi.setObjectName("checkBox_dmi") self.checkBox_macd = QtWidgets.QCheckBox(self.groupBox) self.checkBox_macd.setGeometry(QtCore.QRect(140, 10, 71, 16)) self.checkBox_macd.setObjectName("checkBox_macd") self.checkBox_kdj = QtWidgets.QCheckBox(self.groupBox) self.checkBox_kdj.setGeometry(QtCore.QRect(250, 10, 71, 16)) self.checkBox_kdj.setObjectName("checkBox_kdj") self.checkBox_5 = QtWidgets.QCheckBox(self.groupBox) self.checkBox_5.setGeometry(QtCore.QRect(140, 30, 71, 16)) self.checkBox_5.setObjectName("checkBox_5") self.checkBox_6 = QtWidgets.QCheckBox(self.groupBox) self.checkBox_6.setGeometry(QtCore.QRect(250, 30, 71, 16)) self.checkBox_6.setObjectName("checkBox_6") self.gridLayout.addWidget(self.groupBox, 0, 0, 2, 1) self.pushButton_draw = QtWidgets.QPushButton(self.centralwidget) self.pushButton_draw.setObjectName("pushButton_draw") self.gridLayout.addWidget(self.pushButton_draw, 1, 1, 1, 1) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setObjectName("pushButton") self.gridLayout.addWidget(self.pushButton, 0, 1, 1, 1) self.horizontalLayout.addLayout(self.gridLayout) self.verticalLayout_2.addLayout(self.horizontalLayout) self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setObjectName("tabWidget") self.tab_plot = QtWidgets.QWidget() self.tab_plot.setObjectName("tab_plot") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tab_plot) self.verticalLayout_3.setObjectName("verticalLayout_3") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setContentsMargins(-1, -1, -1, 0) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout_plot_field = QtWidgets.QHBoxLayout() self.horizontalLayout_plot_field.setObjectName("horizontalLayout_plot_field") self.label_para = QtWidgets.QLabel(self.tab_plot) self.label_para.setFrameShape(QtWidgets.QFrame.Box) self.label_para.setObjectName("label_para") self.horizontalLayout_plot_field.addWidget(self.label_para) self.label_point = QtWidgets.QLabel(self.tab_plot) self.label_point.setFrameShape(QtWidgets.QFrame.Box) self.label_point.setObjectName("label_point") self.horizontalLayout_plot_field.addWidget(self.label_point) self.label_file = QtWidgets.QLabel(self.tab_plot) self.label_file.setFrameShape(QtWidgets.QFrame.Box) self.label_file.setObjectName("label_file") self.horizontalLayout_plot_field.addWidget(self.label_file) self.verticalLayout.addLayout(self.horizontalLayout_plot_field) self.verticalLayout_3.addLayout(self.verticalLayout) self.tabWidget.addTab(self.tab_plot, "") self.tab_para = QtWidgets.QWidget() self.tab_para.setObjectName("tab_para") self.groupBox_3 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_3.setGeometry(QtCore.QRect(400, 110, 361, 91)) self.groupBox_3.setObjectName("groupBox_3") self.label = QtWidgets.QLabel(self.groupBox_3) self.label.setGeometry(QtCore.QRect(30, 40, 41, 16)) self.label.setObjectName("label") self.lineEdit_macd_short = QtWidgets.QLineEdit(self.groupBox_3) self.lineEdit_macd_short.setGeometry(QtCore.QRect(80, 40, 41, 20)) self.lineEdit_macd_short.setObjectName("lineEdit_macd_short") self.label_2 = QtWidgets.QLabel(self.groupBox_3) self.label_2.setGeometry(QtCore.QRect(150, 40, 31, 16)) self.label_2.setObjectName("label_2") self.lineEdit_macd_long = QtWidgets.QLineEdit(self.groupBox_3) self.lineEdit_macd_long.setGeometry(QtCore.QRect(190, 40, 41, 20)) self.lineEdit_macd_long.setObjectName("lineEdit_macd_long") self.label_3 = QtWidgets.QLabel(self.groupBox_3) self.label_3.setGeometry(QtCore.QRect(260, 40, 21, 16)) self.label_3.setObjectName("label_3") self.lineEdit_macd_m = QtWidgets.QLineEdit(self.groupBox_3) self.lineEdit_macd_m.setGeometry(QtCore.QRect(280, 40, 41, 20)) self.lineEdit_macd_m.setObjectName("lineEdit_macd_m") self.groupBox_2 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_2.setGeometry(QtCore.QRect(20, 110, 371, 90)) self.groupBox_2.setObjectName("groupBox_2") self.gridLayoutWidget = QtWidgets.QWidget(self.groupBox_2) self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 351, 80)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_5.setObjectName("label_5") self.gridLayout_2.addWidget(self.label_5, 0, 0, 1, 1) self.label_7 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_7.setObjectName("label_7") self.gridLayout_2.addWidget(self.label_7, 0, 4, 1, 1) self.lineEdit_ma_n3 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n3.setObjectName("lineEdit_ma_n3") self.gridLayout_2.addWidget(self.lineEdit_ma_n3, 0, 5, 1, 1) self.lineEdit_ma_n2 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n2.setObjectName("lineEdit_ma_n2") self.gridLayout_2.addWidget(self.lineEdit_ma_n2, 0, 3, 1, 1) self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_6.setObjectName("label_6") self.gridLayout_2.addWidget(self.label_6, 0, 2, 1, 1) self.lineEdit_ma_n1 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n1.setObjectName("lineEdit_ma_n1") self.gridLayout_2.addWidget(self.lineEdit_ma_n1, 0, 1, 1, 1) self.label_8 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_8.setObjectName("label_8") self.gridLayout_2.addWidget(self.label_8, 1, 0, 1, 1) self.lineEdit_ma_n4 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n4.setObjectName("lineEdit_ma_n4") self.gridLayout_2.addWidget(self.lineEdit_ma_n4, 1, 1, 1, 1) self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_9.setObjectName("label_9") self.gridLayout_2.addWidget(self.label_9, 1, 2, 1, 1) self.lineEdit_ma_n5 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n5.setObjectName("lineEdit_ma_n5") self.gridLayout_2.addWidget(self.lineEdit_ma_n5, 1, 3, 1, 1) self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_10.setObjectName("label_10") self.gridLayout_2.addWidget(self.label_10, 1, 4, 1, 1) self.comboBox_ma = QtWidgets.QComboBox(self.gridLayoutWidget) self.comboBox_ma.setObjectName("comboBox_ma") self.comboBox_ma.addItem("") self.comboBox_ma.addItem("") self.gridLayout_2.addWidget(self.comboBox_ma, 1, 5, 1, 1) self.groupBox_5 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_5.setGeometry(QtCore.QRect(400, 10, 361, 91)) self.groupBox_5.setObjectName("groupBox_5") self.pushButton_opr_file = QtWidgets.QPushButton(self.groupBox_5) self.pushButton_opr_file.setGeometry(QtCore.QRect(30, 40, 75, 23)) self.pushButton_opr_file.setObjectName("pushButton_opr_file") self.label_opr = QtWidgets.QLabel(self.groupBox_5) self.label_opr.setGeometry(QtCore.QRect(130, 40, 54, 12)) self.label_opr.setObjectName("label_opr") self.groupBox_4 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_4.setGeometry(QtCore.QRect(20, 10, 371, 90)) self.groupBox_4.setObjectName("groupBox_4") self.gridLayoutWidget_2 = QtWidgets.QWidget(self.groupBox_4) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(9, 9, 351, 61)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_3.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName("gridLayout_3") self.lineEdit_contract = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit_contract.setObjectName("lineEdit_contract") self.gridLayout_3.addWidget(self.lineEdit_contract, 0, 2, 1, 1) self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_4.setObjectName("label_4") self.gridLayout_3.addWidget(self.label_4, 0, 3, 1, 1) self.label_12 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_12.setObjectName("label_12") self.gridLayout_3.addWidget(self.label_12, 0, 0, 1, 1) self.comboBox_bar = QtWidgets.QComboBox(self.gridLayoutWidget_2) self.comboBox_bar.setObjectName("comboBox_bar") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.comboBox_bar.addItem("") self.gridLayout_3.addWidget(self.comboBox_bar, 0, 4, 1, 1) self.label_13 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_13.setObjectName("label_13") self.gridLayout_3.addWidget(self.label_13, 1, 0, 1, 1) self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_14.setObjectName("label_14") self.gridLayout_3.addWidget(self.label_14, 1, 3, 1, 1) self.dateEdit_end = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dateEdit_end.setDateTime(QtCore.QDateTime(QtCore.QDate(2018, 6, 30), QtCore.QTime(0, 0, 0))) self.dateEdit_end.setObjectName("dateEdit_end") self.gridLayout_3.addWidget(self.dateEdit_end, 1, 4, 1, 1) self.dateEdit_start = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dateEdit_start.setObjectName("dateEdit_start") self.gridLayout_3.addWidget(self.dateEdit_start, 1, 2, 1, 1) self.pushButton_set_para = QtWidgets.QPushButton(self.tab_para) self.pushButton_set_para.setGeometry(QtCore.QRect(360, 330, 75, 23)) self.pushButton_set_para.setObjectName("pushButton_set_para") self.groupBox_6 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_6.setGeometry(QtCore.QRect(20, 210, 371, 80)) self.groupBox_6.setObjectName("groupBox_6") self.lineEdit_kdj_n = QtWidgets.QLineEdit(self.groupBox_6) self.lineEdit_kdj_n.setGeometry(QtCore.QRect(40, 30, 51, 20)) self.lineEdit_kdj_n.setObjectName("lineEdit_kdj_n") self.lineEdit_kdj_m1 = QtWidgets.QLineEdit(self.groupBox_6) self.lineEdit_kdj_m1.setGeometry(QtCore.QRect(160, 30, 51, 20)) self.lineEdit_kdj_m1.setObjectName("lineEdit_kdj_m1") self.lineEdit_kdj_m2 = QtWidgets.QLineEdit(self.groupBox_6) self.lineEdit_kdj_m2.setGeometry(QtCore.QRect(270, 30, 51, 20)) self.lineEdit_kdj_m2.setObjectName("lineEdit_kdj_m2") self.label_11 = QtWidgets.QLabel(self.groupBox_6) self.label_11.setGeometry(QtCore.QRect(20, 30, 21, 16)) self.label_11.setObjectName("label_11") self.label_15 = QtWidgets.QLabel(self.groupBox_6) self.label_15.setGeometry(QtCore.QRect(140, 30, 21, 16)) self.label_15.setObjectName("label_15") self.label_16 = QtWidgets.QLabel(self.groupBox_6) self.label_16.setGeometry(QtCore.QRect(250, 30, 21, 16)) self.label_16.setObjectName("label_16") self.groupBox_7 = QtWidgets.QGroupBox(self.tab_para) self.groupBox_7.setGeometry(QtCore.QRect(400, 210, 361, 81)) self.groupBox_7.setObjectName("groupBox_7") self.lineEdit_dmi_n = QtWidgets.QLineEdit(self.groupBox_7) self.lineEdit_dmi_n.setGeometry(QtCore.QRect(70, 30, 41, 20)) self.lineEdit_dmi_n.setObjectName("lineEdit_dmi_n") self.lineEdit_dmi_m = QtWidgets.QLineEdit(self.groupBox_7) self.lineEdit_dmi_m.setGeometry(QtCore.QRect(190, 30, 41, 20)) self.lineEdit_dmi_m.setObjectName("lineEdit_dmi_m") self.label_17 = QtWidgets.QLabel(self.groupBox_7) self.label_17.setGeometry(QtCore.QRect(40, 30, 31, 16)) self.label_17.setObjectName("label_17") self.label_18 = QtWidgets.QLabel(self.groupBox_7) self.label_18.setGeometry(QtCore.QRect(170, 30, 21, 16)) self.label_18.setObjectName("label_18") self.tabWidget.addTab(self.tab_para, "") self.verticalLayout_2.addWidget(self.tabWidget) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow")) self.groupBox.setTitle(QtWidgets.QApplication.translate("MainWindow", "指标开关")) self.checkBox_ma.setText(QtWidgets.QApplication.translate("MainWindow", "MA")) self.checkBox_dmi.setText(QtWidgets.QApplication.translate("MainWindow", "DMI")) self.checkBox_macd.setText(QtWidgets.QApplication.translate("MainWindow", "MACD")) self.checkBox_kdj.setText(QtWidgets.QApplication.translate("MainWindow", "KDJ")) self.checkBox_5.setText(QtWidgets.QApplication.translate("MainWindow", "CheckBox")) self.checkBox_6.setText(QtWidgets.QApplication.translate("MainWindow", "CheckBox")) self.pushButton_draw.setText(QtWidgets.QApplication.translate("MainWindow", "绘图")) self.pushButton.setText(QtWidgets.QApplication.translate("MainWindow", "设置参数")) self.label_para.setText(QtWidgets.QApplication.translate("MainWindow", "TextLabel")) self.label_point.setText(QtWidgets.QApplication.translate("MainWindow", "TextLabel")) self.label_file.setText(QtWidgets.QApplication.translate("MainWindow", "TextLabel")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_plot), QtWidgets.QApplication.translate("MainWindow", "行情")) self.groupBox_3.setTitle(QtWidgets.QApplication.translate("MainWindow", "MACD参数")) self.label.setText(QtWidgets.QApplication.translate("MainWindow", "Short")) self.lineEdit_macd_short.setText(QtWidgets.QApplication.translate("MainWindow", "12")) self.label_2.setText(QtWidgets.QApplication.translate("MainWindow", "Long")) self.lineEdit_macd_long.setText(QtWidgets.QApplication.translate("MainWindow", "26")) self.label_3.setText(QtWidgets.QApplication.translate("MainWindow", "M")) self.lineEdit_macd_m.setText(QtWidgets.QApplication.translate("MainWindow", "9")) self.groupBox_2.setTitle(QtWidgets.QApplication.translate("MainWindow", "MA参数")) self.label_5.setText(QtWidgets.QApplication.translate("MainWindow", "N1")) self.label_7.setText(QtWidgets.QApplication.translate("MainWindow", "N3")) self.lineEdit_ma_n3.setText(QtWidgets.QApplication.translate("MainWindow", "20")) self.lineEdit_ma_n2.setText(QtWidgets.QApplication.translate("MainWindow", "10")) self.label_6.setText(QtWidgets.QApplication.translate("MainWindow", "N2")) self.lineEdit_ma_n1.setText(QtWidgets.QApplication.translate("MainWindow", "5")) self.label_8.setText(QtWidgets.QApplication.translate("MainWindow", "N4")) self.lineEdit_ma_n4.setText(QtWidgets.QApplication.translate("MainWindow", "30")) self.label_9.setText(QtWidgets.QApplication.translate("MainWindow", "N5")) self.lineEdit_ma_n5.setText(QtWidgets.QApplication.translate("MainWindow", "50")) self.label_10.setText(QtWidgets.QApplication.translate("MainWindow", "算法")) self.comboBox_ma.setItemText(0, QtWidgets.QApplication.translate("MainWindow", "MA")) self.comboBox_ma.setItemText(1, QtWidgets.QApplication.translate("MainWindow", "EMA")) self.groupBox_5.setTitle(QtWidgets.QApplication.translate("MainWindow", "回测文件")) self.pushButton_opr_file.setText(QtWidgets.QApplication.translate("MainWindow", "PushButton")) self.label_opr.setText(QtWidgets.QApplication.translate("MainWindow", "TextLabel")) self.groupBox_4.setTitle(QtWidgets.QApplication.translate("MainWindow", "公共参数")) self.label_4.setText(QtWidgets.QApplication.translate("MainWindow", "周期")) self.label_12.setText(QtWidgets.QApplication.translate("MainWindow", "合约")) self.comboBox_bar.setItemText(0, QtWidgets.QApplication.translate("MainWindow", "0")) self.comboBox_bar.setItemText(1, QtWidgets.QApplication.translate("MainWindow", "60")) self.comboBox_bar.setItemText(2, QtWidgets.QApplication.translate("MainWindow", "300")) self.comboBox_bar.setItemText(3, QtWidgets.QApplication.translate("MainWindow", "600")) self.comboBox_bar.setItemText(4, QtWidgets.QApplication.translate("MainWindow", "900")) self.comboBox_bar.setItemText(5, QtWidgets.QApplication.translate("MainWindow", "1800")) self.comboBox_bar.setItemText(6, QtWidgets.QApplication.translate("MainWindow", "3600")) self.label_13.setText(QtWidgets.QApplication.translate("MainWindow", "开始时间")) self.label_14.setText(QtWidgets.QApplication.translate("MainWindow", "结束时间")) self.pushButton_set_para.setText(QtWidgets.QApplication.translate("MainWindow", "设置")) self.groupBox_6.setTitle(QtWidgets.QApplication.translate("MainWindow", "KDJ参数")) self.lineEdit_kdj_n.setText(QtWidgets.QApplication.translate("MainWindow", "9")) self.lineEdit_kdj_m1.setText(QtWidgets.QApplication.translate("MainWindow", "3")) self.lineEdit_kdj_m2.setText(QtWidgets.QApplication.translate("MainWindow", "3")) self.label_11.setText(QtWidgets.QApplication.translate("MainWindow", "N")) self.label_15.setText(QtWidgets.QApplication.translate("MainWindow", "M1")) self.label_16.setText(QtWidgets.QApplication.translate("MainWindow", "M2")) self.groupBox_7.setTitle(QtWidgets.QApplication.translate("MainWindow", "DMI参数")) self.lineEdit_dmi_n.setText(QtWidgets.QApplication.translate("MainWindow", "14")) self.lineEdit_dmi_m.setText(QtWidgets.QApplication.translate("MainWindow", "6")) self.label_17.setText(QtWidgets.QApplication.translate("MainWindow", "N")) self.label_18.setText(QtWidgets.QApplication.translate("MainWindow", "M")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_para), QtWidgets.QApplication.translate("MainWindow", "参数设置"))
/kviewer_app.py
# -*- coding: utf-8 -*- import kviewer2 from indexer import Indexer_MA import parameter2 from PyQt5 import QtCore, QtWidgets, QtGui import sys import time import pyqtgraph as pg import pandas as pd import numpy as np #import tushare as ts import datetime from matplotlib.pylab import date2num #import DATA_CONSTANTS as DC """ 'b': QtGui.QColor(0,0,255,255), 'g': QtGui.QColor(0,255,0,255), 'r': QtGui.QColor(255,0,0,255), 'c': QtGui.QColor(0,255,255,255), 'm': QtGui.QColor(255,0,255,255), 'y': QtGui.QColor(255,255,0,255), 'k': QtGui.QColor(0,0,0,255), 'w': QtGui.QColor(255,255,255,255), 'd': QtGui.QColor(150,150,150,255), 'l': QtGui.QColor(200,200,200,255), 's': QtGui.QColor(100,100,150,255),""" color_list = ['w', 'y', 'c','r','g'] class MainWindow(object): def __init__(self): app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() self.ui = kviewer2.Ui_MainWindow() self.ui.setupUi(MainWindow) self.ma_para = [] self.ma_data = [] self.ma_plot_dic = {} # 准备数据 hist_data = pd.read_excel('RB1810_2018-06-19_1m.xlsx') #hist_data = DC.getBarBySymbol('SHFE.RB', 'RB1805', 3600) self.t = range(hist_data.shape[0]) self.date_list = hist_data['strtime'].tolist() self.open = hist_data.open.tolist() self.high = hist_data.high.tolist() self.low = hist_data.low.tolist() self.close = hist_data.close.tolist() self.prepare_indexer_para() packdate = zip(self.t,self.open, self.close, self.low, self.high) self.plt1 = self.chart(self.date_list,packdate) self.plt2 = self.chart2(self.t, self.close) self.plt1.addLegend() """ i=0 for d in self.ma_para: pname='ma%d'%d self.ma_plot_dic[pname]=self.plt1.plot(name=pname,pen=color_list[i]) i+=1 self.prepare_indexer_data() """ self.ma_indexer = Indexer_MA(self.plt1, hist_data, [self.ui.lineEdit_ma_n1, self.ui.lineEdit_ma_n2, self.ui.lineEdit_ma_n3, self.ui.lineEdit_ma_n4, self.ui.lineEdit_ma_n5]) self.ma_indexer.draw() #self.label = QtWidgets.QLabel() # 加入竖线 self.vLine = pg.InfiniteLine(angle=90, movable=False) self.plt1.addItem(self.vLine, ignoreBounds=True) # self.vb = self.plt1.viewRect() # 下面第2个图的范围设置框 self.region = pg.LinearRegionItem() self.region.setZValue(10) self.region.sigRegionChanged.connect(self.update_plt1) self.plt1.sigRangeChanged.connect(self.updateRegion) self.region.setRegion([0, 100]) self.plt2.addItem(self.region, ignoreBounds=True) #self.ui.verticalLayout.addWidget(self.label) self.ui.verticalLayout.addWidget(self.plt1) self.ui.verticalLayout.addWidget(self.plt2) proxy = pg.SignalProxy(self.plt1.scene().sigMouseMoved, rateLimit=60, slot=self.mouseMoved) MainWindow.show() sys.exit(app.exec_()) def prepare_indexer_para(self): # 准备指标参数 """ self.ma_para = { 'N1': 5, 'N2': 10, 'N3': 15, 'N4': 30, 'N5': 50 } """ self.ma_para = [5,10,20,30,50] self.macd_para = { 'short': 5, 'long': 10, 'M': 9 } self.kdj_para = { 'N': 9, 'M1': 3, 'M2': 3 } self.dmi_para = { 'N': 14, 'M': 6 } def prepare_indexer_data(self): # ma数据 for d in self.ma_para: data_name = 'ma%d' % d data = pd.Series(self.close).rolling(d).mean() self.ma_plot_dic[data_name].setData(data) def set_ma_para(self): self.ma_para[0]+=3 data = pd.Series(self.close).rolling(self.ma_para[0]).mean() self.ma_plot_dic['ma5'].setData(data) def chart(self,date_list, data_list): item = CandlestickItem(data_list) axis = DateAxis(date_strings=date_list, orientation='bottom') plt = pg.PlotWidget() plt.axisItems = {'bottom':axis} plt.addItem(item, ) plt.showGrid(x=True, y=True) return plt def chart2(self,x,y): plt = pg.PlotWidget() plt.addLegend() # 加上图标 plt.plot(x=x,y=y, pen="w", name='close') return plt def update_plt1(self): self.region.setZValue(10) minX, maxX = self.region.getRegion() #Y轴自适应 int_minY = max(0,int(minX)) int_maxY = max(1, int(maxX)) minY = min(self.low[int_minY:int_maxY]) - 5 maxY = max(self.high[int_minY:int_maxY]) +5 self.plt1.setYRange(minY, maxY) self.plt1.setXRange(minX, maxX, padding=0) def updateRegion(self,window, viewRange): rgn = viewRange[0] self.region.setRegion(rgn) def mouseMoved(self,event): pos = event[0] ## using signal proxy turns original arguments into a tuple if self.plt1.sceneBoundingRect().contains(pos): a = self.plt1.boundingRect().getRect() minx, maxx = self.region.getRegion() knum = maxx-minx # (pos.x()-35)表示鼠标点距离左边框的位置 # (a[2]-35)/knum表示每一根K线占用的像素点数量 # 上面两者两除即为鼠标位置点的K线序号,+minx就是在整个数据列表中的位置 rx = int((pos.x()-35)/((a[2]-35)/knum)+minx) index = rx if index > 0 and index < len(self.t): open = self.open[index] close = self.close[index] if open > close: c = 'green' elif open < close: c = 'red' else: c = 'black' self.ui.label_point.setText( """ <span style='color: %s'>open=%0.1f,high=%0.1f,low=%0.1f,close=%0.1f</span>,%s """ % ( c,self.open[index], self.high[index], self.low[index],self.close[index],self.date_list[index])) self.ui.label_para.setText( self.ma_indexer.get_indexer_value_text(index) ) self.vLine.setPos(index) def set_parameter(self): # 从参数页获取参数 pass class DateAxis(pg.AxisItem): def __init__(self, date_strings, orientation): pg.AxisItem.__init__(self,orientation) self.date_strings = date_strings self.len = len(self.date_strings) def tickStrings(self, values, scale, spacing): """ strns = [] rng = max(values) - min(values) # if rng < 120: # return pg.AxisItem.tickStrings(self, values, scale, spacing) if rng < 3600 * 24: string = '%H:%M:%S' label1 = '%b %d -' label2 = ' %b %d, %Y' elif rng >= 3600 * 24 and rng < 3600 * 24 * 30: string = '%d' label1 = '%b - ' label2 = '%b, %Y' elif rng >= 3600 * 24 * 30 and rng < 3600 * 24 * 30 * 24: string = '%b' label1 = '%Y -' label2 = ' %Y' elif rng >= 3600 * 24 * 30 * 24: string = '%Y' label1 = '' label2 = '' for x in values: try: strns.append(time.strftime(string, time.localtime(x))) except ValueError: ## Windows can't handle dates before 1970 strns.append('') try: label = time.strftime(label1, time.localtime(min(values))) + time.strftime(label2, time.localtime(max(values))) except ValueError: label = '' # self.setLabel(text=label) return strns """ #print values strns = [] for x in values: x1 = int(x) if 0 <= x1 < self.len: strns.append(self.date_strings[x1]) else: strns.append('') return strns ## Create a subclass of GraphicsObject. ## The only required methods are paint() and boundingRect() ## (see QGraphicsItem documentation) class CandlestickItem(pg.GraphicsObject): def __init__(self, data): pg.GraphicsObject.__init__(self) self.data = data ## data must have fields: time, open, close, min, max self.generatePicture() def generatePicture(self): ## pre-computing a QPicture object allows paint() to run much more quickly, ## rather than re-drawing the shapes every time. self.picture = QtGui.QPicture() p = QtGui.QPainter(self.picture) p.setPen(pg.mkPen('w')) w = (self.data[1][0] - self.data[0][0]) / 3. for (t, open, close, min, max) in self.data: p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max)) if open > close: p.setBrush(pg.mkBrush('g')) else: p.setBrush(pg.mkBrush('r')) p.drawRect(QtCore.QRectF(t - w, open, w * 2, close - open)) p.end() def paint(self, p, *args): p.drawPicture(0, 0, self.picture) def boundingRect(self): ## boundingRect _must_ indicate the entire area that will be drawn on ## or else we will get artifacts and possibly crashing. ## (in this case, QPicture does all the work of computing the bouning rect for us) return QtCore.QRectF(self.picture.boundingRect()) if __name__=='__main__': MainWindow()
/nullWindow.py
# -*- coding: utf-8 -*- from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(641, 405) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName("verticalLayout") self.tableWidget = QtWidgets.QTableWidget(self.centralwidget) self.tableWidget.setRowCount(4) self.tableWidget.setColumnCount(6) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(6) self.tableWidget.setRowCount(4) item = QtWidgets.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) self.verticalLayout.addWidget(self.tableWidget) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 641, 23)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "MainWindow")) self.tableWidget.verticalHeaderItem(0).setText(QtWidgets.QApplication.translate("MainWindow", "1st row")) self.tableWidget.verticalHeaderItem(1).setText(QtWidgets.QApplication.translate("MainWindow", "2nd row")) self.tableWidget.horizontalHeaderItem(0).setText(QtWidgets.QApplication.translate("MainWindow", "1st col")) self.tableWidget.horizontalHeaderItem(1).setText(QtWidgets.QApplication.translate("MainWindow", "2nd col")) self.tableWidget.horizontalHeaderItem(2).setText(QtWidgets.QApplication.translate("MainWindow", "3rd col")) self.pushButton_2.setText(QtWidgets.QApplication.translate("MainWindow", "PushButton")) self.pushButton.setText(QtWidgets.QApplication.translate("MainWindow", "PushButton"))
/parameter.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'parameter.ui' # # Created: Sat Jul 21 16:49:46 2018 # by: pyside-uic 0.2.15 running on PySide 1.2.4 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(398, 494) self.verticalLayout = QtGui.QVBoxLayout(Form) self.verticalLayout.setObjectName("verticalLayout") self.groupBox = QtGui.QGroupBox(Form) self.groupBox.setObjectName("groupBox") self.gridLayoutWidget_2 = QtGui.QWidget(self.groupBox) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(9, 9, 351, 61)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_2.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.lineEdit = QtGui.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName("lineEdit") self.gridLayout_2.addWidget(self.lineEdit, 0, 2, 1, 1) self.label_2 = QtGui.QLabel(self.gridLayoutWidget_2) self.label_2.setObjectName("label_2") self.gridLayout_2.addWidget(self.label_2, 0, 3, 1, 1) self.label = QtGui.QLabel(self.gridLayoutWidget_2) self.label.setObjectName("label") self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1) self.comboBox = QtGui.QComboBox(self.gridLayoutWidget_2) self.comboBox.setObjectName("comboBox") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.gridLayout_2.addWidget(self.comboBox, 0, 4, 1, 1) self.label_3 = QtGui.QLabel(self.gridLayoutWidget_2) self.label_3.setObjectName("label_3") self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1) self.label_4 = QtGui.QLabel(self.gridLayoutWidget_2) self.label_4.setObjectName("label_4") self.gridLayout_2.addWidget(self.label_4, 1, 3, 1, 1) self.dateEdit_2 = QtGui.QDateEdit(self.gridLayoutWidget_2) self.dateEdit_2.setObjectName("dateEdit_2") self.gridLayout_2.addWidget(self.dateEdit_2, 1, 4, 1, 1) self.dateEdit = QtGui.QDateEdit(self.gridLayoutWidget_2) self.dateEdit.setObjectName("dateEdit") self.gridLayout_2.addWidget(self.dateEdit, 1, 2, 1, 1) self.verticalLayout.addWidget(self.groupBox) self.groupBox_5 = QtGui.QGroupBox(Form) self.groupBox_5.setObjectName("groupBox_5") self.pushButton_3 = QtGui.QPushButton(self.groupBox_5) self.pushButton_3.setGeometry(QtCore.QRect(20, 20, 75, 23)) self.pushButton_3.setObjectName("pushButton_3") self.label_11 = QtGui.QLabel(self.groupBox_5) self.label_11.setGeometry(QtCore.QRect(110, 30, 54, 12)) self.label_11.setObjectName("label_11") self.verticalLayout.addWidget(self.groupBox_5) self.groupBox_2 = QtGui.QGroupBox(Form) self.groupBox_2.setObjectName("groupBox_2") self.gridLayoutWidget = QtGui.QWidget(self.groupBox_2) self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 351, 80)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout = QtGui.QGridLayout(self.gridLayoutWidget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.label_5 = QtGui.QLabel(self.gridLayoutWidget) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 0, 0, 1, 1) self.label_7 = QtGui.QLabel(self.gridLayoutWidget) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 0, 4, 1, 1) self.lineEdit_ma_n3 = QtGui.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n3.setObjectName("lineEdit_ma_n3") self.gridLayout.addWidget(self.lineEdit_ma_n3, 0, 5, 1, 1) self.lineEdit_ma_n2 = QtGui.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n2.setObjectName("lineEdit_ma_n2") self.gridLayout.addWidget(self.lineEdit_ma_n2, 0, 3, 1, 1) self.label_6 = QtGui.QLabel(self.gridLayoutWidget) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 2, 1, 1) self.lineEdit_ma_n1 = QtGui.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n1.setObjectName("lineEdit_ma_n1") self.gridLayout.addWidget(self.lineEdit_ma_n1, 0, 1, 1, 1) self.label_8 = QtGui.QLabel(self.gridLayoutWidget) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 1, 0, 1, 1) self.lineEdit_ma_n4 = QtGui.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n4.setObjectName("lineEdit_ma_n4") self.gridLayout.addWidget(self.lineEdit_ma_n4, 1, 1, 1, 1) self.label_9 = QtGui.QLabel(self.gridLayoutWidget) self.label_9.setObjectName("label_9") self.gridLayout.addWidget(self.label_9, 1, 2, 1, 1) self.lineEdit_ma_n5 = QtGui.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n5.setObjectName("lineEdit_ma_n5") self.gridLayout.addWidget(self.lineEdit_ma_n5, 1, 3, 1, 1) self.label_10 = QtGui.QLabel(self.gridLayoutWidget) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 1, 4, 1, 1) self.comboBox_ma = QtGui.QComboBox(self.gridLayoutWidget) self.comboBox_ma.setObjectName("comboBox_ma") self.comboBox_ma.addItem("") self.comboBox_ma.addItem("") self.gridLayout.addWidget(self.comboBox_ma, 1, 5, 1, 1) self.verticalLayout.addWidget(self.groupBox_2) self.groupBox_3 = QtGui.QGroupBox(Form) self.groupBox_3.setObjectName("groupBox_3") self.verticalLayout.addWidget(self.groupBox_3) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton = QtGui.QPushButton(Form) self.pushButton.setLayoutDirection(QtCore.Qt.LeftToRight) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.pushButton_2 = QtGui.QPushButton(Form) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8)) self.groupBox.setTitle(QtGui.QApplication.translate("Form", "公共参数", None, QtGui.QApplication.UnicodeUTF8)) self.label_2.setText(QtGui.QApplication.translate("Form", "周期", None, QtGui.QApplication.UnicodeUTF8)) self.label.setText(QtGui.QApplication.translate("Form", "合约", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(0, QtGui.QApplication.translate("Form", "0", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(1, QtGui.QApplication.translate("Form", "60", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(2, QtGui.QApplication.translate("Form", "300", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(3, QtGui.QApplication.translate("Form", "600", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(4, QtGui.QApplication.translate("Form", "900", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(5, QtGui.QApplication.translate("Form", "1800", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox.setItemText(6, QtGui.QApplication.translate("Form", "3600", None, QtGui.QApplication.UnicodeUTF8)) self.label_3.setText(QtGui.QApplication.translate("Form", "开始时间", None, QtGui.QApplication.UnicodeUTF8)) self.label_4.setText(QtGui.QApplication.translate("Form", "结束时间", None, QtGui.QApplication.UnicodeUTF8)) self.groupBox_5.setTitle(QtGui.QApplication.translate("Form", "回测文件", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_3.setText(QtGui.QApplication.translate("Form", "PushButton", None, QtGui.QApplication.UnicodeUTF8)) self.label_11.setText(QtGui.QApplication.translate("Form", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.groupBox_2.setTitle(QtGui.QApplication.translate("Form", "MA参数", None, QtGui.QApplication.UnicodeUTF8)) self.label_5.setText(QtGui.QApplication.translate("Form", "N1", None, QtGui.QApplication.UnicodeUTF8)) self.label_7.setText(QtGui.QApplication.translate("Form", "N3", None, QtGui.QApplication.UnicodeUTF8)) self.label_6.setText(QtGui.QApplication.translate("Form", "N2", None, QtGui.QApplication.UnicodeUTF8)) self.label_8.setText(QtGui.QApplication.translate("Form", "N4", None, QtGui.QApplication.UnicodeUTF8)) self.label_9.setText(QtGui.QApplication.translate("Form", "N5", None, QtGui.QApplication.UnicodeUTF8)) self.label_10.setText(QtGui.QApplication.translate("Form", "算法", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox_ma.setItemText(0, QtGui.QApplication.translate("Form", "MA", None, QtGui.QApplication.UnicodeUTF8)) self.comboBox_ma.setItemText(1, QtGui.QApplication.translate("Form", "EMA", None, QtGui.QApplication.UnicodeUTF8)) self.groupBox_3.setTitle(QtGui.QApplication.translate("Form", "MACD参数", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton.setText(QtGui.QApplication.translate("Form", "确定", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_2.setText(QtGui.QApplication.translate("Form", "取消", None, QtGui.QApplication.UnicodeUTF8))
/parameter2.py
# -*- coding: utf-8 -*- from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(398, 494) self.verticalLayout = QtWidgets.QVBoxLayout(Form) self.verticalLayout.setObjectName("verticalLayout") self.groupBox = QtWidgets.QGroupBox(Form) self.groupBox.setObjectName("groupBox") self.gridLayoutWidget_2 = QtWidgets.QWidget(self.groupBox) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(9, 9, 351, 61)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_2.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.lineEdit.setObjectName("lineEdit") self.gridLayout_2.addWidget(self.lineEdit, 0, 2, 1, 1) self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_2.setObjectName("label_2") self.gridLayout_2.addWidget(self.label_2, 0, 3, 1, 1) self.label = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label.setObjectName("label") self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1) self.comboBox = QtWidgets.QComboBox(self.gridLayoutWidget_2) self.comboBox.setObjectName("comboBox") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.comboBox.addItem("") self.gridLayout_2.addWidget(self.comboBox, 0, 4, 1, 1) self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_3.setObjectName("label_3") self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1) self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_4.setObjectName("label_4") self.gridLayout_2.addWidget(self.label_4, 1, 3, 1, 1) self.dateEdit_2 = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dateEdit_2.setObjectName("dateEdit_2") self.gridLayout_2.addWidget(self.dateEdit_2, 1, 4, 1, 1) self.dateEdit = QtWidgets.QDateEdit(self.gridLayoutWidget_2) self.dateEdit.setObjectName("dateEdit") self.gridLayout_2.addWidget(self.dateEdit, 1, 2, 1, 1) self.verticalLayout.addWidget(self.groupBox) self.groupBox_5 = QtWidgets.QGroupBox(Form) self.groupBox_5.setObjectName("groupBox_5") self.pushButton_3 = QtWidgets.QPushButton(self.groupBox_5) self.pushButton_3.setGeometry(QtCore.QRect(20, 20, 75, 23)) self.pushButton_3.setObjectName("pushButton_3") self.label_11 = QtWidgets.QLabel(self.groupBox_5) self.label_11.setGeometry(QtCore.QRect(110, 30, 54, 12)) self.label_11.setObjectName("label_11") self.verticalLayout.addWidget(self.groupBox_5) self.groupBox_2 = QtWidgets.QGroupBox(Form) self.groupBox_2.setObjectName("groupBox_2") self.gridLayoutWidget = QtWidgets.QWidget(self.groupBox_2) self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 351, 80)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 0, 0, 1, 1) self.label_7 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 0, 4, 1, 1) self.lineEdit_ma_n3 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n3.setObjectName("lineEdit_ma_n3") self.gridLayout.addWidget(self.lineEdit_ma_n3, 0, 5, 1, 1) self.lineEdit_ma_n2 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n2.setObjectName("lineEdit_ma_n2") self.gridLayout.addWidget(self.lineEdit_ma_n2, 0, 3, 1, 1) self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 0, 2, 1, 1) self.lineEdit_ma_n1 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n1.setObjectName("lineEdit_ma_n1") self.gridLayout.addWidget(self.lineEdit_ma_n1, 0, 1, 1, 1) self.label_8 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 1, 0, 1, 1) self.lineEdit_ma_n4 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n4.setObjectName("lineEdit_ma_n4") self.gridLayout.addWidget(self.lineEdit_ma_n4, 1, 1, 1, 1) self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_9.setObjectName("label_9") self.gridLayout.addWidget(self.label_9, 1, 2, 1, 1) self.lineEdit_ma_n5 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.lineEdit_ma_n5.setObjectName("lineEdit_ma_n5") self.gridLayout.addWidget(self.lineEdit_ma_n5, 1, 3, 1, 1) self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 1, 4, 1, 1) self.comboBox_ma = QtWidgets.QComboBox(self.gridLayoutWidget) self.comboBox_ma.setObjectName("comboBox_ma") self.comboBox_ma.addItem("") self.comboBox_ma.addItem("") self.gridLayout.addWidget(self.comboBox_ma, 1, 5, 1, 1) self.verticalLayout.addWidget(self.groupBox_2) self.groupBox_3 = QtWidgets.QGroupBox(Form) self.groupBox_3.setObjectName("groupBox_3") self.verticalLayout.addWidget(self.groupBox_3) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton = QtWidgets.QPushButton(Form) self.pushButton.setLayoutDirection(QtCore.Qt.LeftToRight) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.pushButton_2 = QtWidgets.QPushButton(Form) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): Form.setWindowTitle(QtWidgets.QApplication.translate("Form", "Form")) self.groupBox.setTitle(QtWidgets.QApplication.translate("Form", "公共参数")) self.label_2.setText(QtWidgets.QApplication.translate("Form", "周期")) self.label.setText(QtWidgets.QApplication.translate("Form", "合约")) self.comboBox.setItemText(0, QtWidgets.QApplication.translate("Form", "0")) self.comboBox.setItemText(1, QtWidgets.QApplication.translate("Form", "60")) self.comboBox.setItemText(2, QtWidgets.QApplication.translate("Form", "300")) self.comboBox.setItemText(3, QtWidgets.QApplication.translate("Form", "600")) self.comboBox.setItemText(4, QtWidgets.QApplication.translate("Form", "900")) self.comboBox.setItemText(5, QtWidgets.QApplication.translate("Form", "1800")) self.comboBox.setItemText(6, QtWidgets.QApplication.translate("Form", "3600")) self.label_3.setText(QtWidgets.QApplication.translate("Form", "开始时间")) self.label_4.setText(QtWidgets.QApplication.translate("Form", "结束时间")) self.groupBox_5.setTitle(QtWidgets.QApplication.translate("Form", "回测文件")) self.pushButton_3.setText(QtWidgets.QApplication.translate("Form", "PushButton")) self.label_11.setText(QtWidgets.QApplication.translate("Form", "TextLabel")) self.groupBox_2.setTitle(QtWidgets.QApplication.translate("Form", "MA参数")) self.label_5.setText(QtWidgets.QApplication.translate("Form", "N1")) self.label_7.setText(QtWidgets.QApplication.translate("Form", "N3")) self.label_6.setText(QtWidgets.QApplication.translate("Form", "N2")) self.label_8.setText(QtWidgets.QApplication.translate("Form", "N4")) self.label_9.setText(QtWidgets.QApplication.translate("Form", "N5")) self.label_10.setText(QtWidgets.QApplication.translate("Form", "算法")) self.comboBox_ma.setItemText(0, QtWidgets.QApplication.translate("Form", "MA")) self.comboBox_ma.setItemText(1, QtWidgets.QApplication.translate("Form", "EMA")) self.groupBox_3.setTitle(QtWidgets.QApplication.translate("Form", "MACD参数")) self.pushButton.setText(QtWidgets.QApplication.translate("Form", "确定")) self.pushButton_2.setText(QtWidgets.QApplication.translate("Form", "取消"))
/untitled.py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'untitled.ui' # # Created: Wed Jul 11 23:51:04 2018 # by: pyside-uic 0.2.15 running on PySide 1.2.4 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(641, 405) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName("verticalLayout") self.tableWidget = QtGui.QTableWidget(self.centralwidget) self.tableWidget.setRowCount(4) self.tableWidget.setColumnCount(6) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(6) self.tableWidget.setRowCount(4) item = QtGui.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(2, item) self.verticalLayout.addWidget(self.tableWidget) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton_2 = QtGui.QPushButton(self.centralwidget) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.pushButton = QtGui.QPushButton(self.centralwidget) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 641, 23)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.verticalHeaderItem(0).setText(QtGui.QApplication.translate("MainWindow", "1st row", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.verticalHeaderItem(1).setText(QtGui.QApplication.translate("MainWindow", "2nd row", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("MainWindow", "1st col", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("MainWindow", "2nd col", None, QtGui.QApplication.UnicodeUTF8)) self.tableWidget.horizontalHeaderItem(2).setText(QtGui.QApplication.translate("MainWindow", "3rd col", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_2.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
saadjansari/KymoAnalysis
refs/heads/main
{"/src/Features.py": ["/src/node_graph.py"], "/src/Kymograph.py": ["/src/node_graph.py"], "/src/Track.py": ["/src/node_graph.py"], "/src/Strain.py": ["/src/Load.py", "/src/Kymograph.py"]}
└── ├── KymographAnalysis.py └── src ├── Features.py ├── Kymograph.py ├── Load.py ├── ReadFiles.py ├── Strain.py ├── Track.py ├── breakBipolar.py ├── node_graph.py └── smooth_test.py
/KymographAnalysis.py
#!/usr/bin/env python import os, pdb, sys import matplotlib.pyplot as plt import matplotlib import pandas as pd import numpy as np import seaborn as sns import math, random import glob, yaml, copy, shutil from src.Strain import * """ Name: KymographAnalysis.py Description: Parses and combines tracks from multiple kymographs for mass analysis """ class KymographAnalysis: def __init__(self): self.cwd = os.getcwd() # Read config file with open("config.yaml") as f: self.config = yaml.load(f, Loader=yaml.CLoader) self.InitStrains() self.Analyze() # InitStrains {{{ def InitStrains(self): # Initialize strains with track files self.strains = [] # Get filenames for each strain for strain in self.config["strains"]: trackpaths = [] for fpath in strain["path"]: trackpaths += glob.glob(fpath) # Initialize cstrain = Strain(trackpaths, label=strain["type"]) cstrain.color = tuple(np.array(strain["color"]) / 255) self.strains += [cstrain] # Use Kmeans classfication if required if "useBipolarKmeansLabel" in self.config.keys(): if self.config["useBipolarKmeansLabel"]: for strain, strain_c in zip(self.strains, self.config["strains"]): strain.TrimUsingKmeansLabel(kmean_label=strain_c["kmean_label"]) # }}} # Analyze {{{ def Analyze(self): # Initialize graphing directory if "saveName" in self.config.keys(): gdir = os.path.join(self.cwd, self.config["saveName"]) else: gdir = os.path.join(self.cwd, "result") if os.path.exists(gdir): shutil.rmtree(gdir, ignore_errors=True) os.mkdir(gdir) os.chdir(gdir) # Analyze by groups if self.config["analyzeByLength"] is True: for group in self.config["analyzeGroups"]: # CD to directory ggdir = os.path.join(gdir, group["type"]) os.mkdir(ggdir) os.chdir(ggdir) print("Analyzing {0}".format(group["type"])) strains = copy.deepcopy(self.strains) # Get tracks that match this spindle length for strain in strains: strain.GetTracks(spindle_length=group["length"]) # pdb.set_trace() if self.config["analyzeSpindleIntensity"] is True: self.GraphSpindleIntensity( strains, lrange=group["length"], gname=group["type"] ) self.Graph(strains, gname=group["type"]) else: if self.config["analyzeSPBAssociatedTracks"] == 1: for strain in self.strains: strain.TossFarTracks(self.config["SPBRegion"]) if self.config["analyzeSPBAssociatedTracks"] == 2: for strain in self.strains: strain.TossCloseTracks(self.config["SPBRegion"]) # Get all tracks for strain in self.strains: strain.GetTracks() if self.config["analyzeSpindleIntensity"] is True: self.GraphSpindleIntensity(self.strains) self.Graph(self.strains) os.chdir(self.cwd) # }}} # Graph {{{ def Graph(self, strains, gname=None): # Graph useful properties plt.rcParams.update({"font.size": 14}) plt.rc("legend", fontsize=12) self.PlotTracksByState(k=1000) self.PlotAllTracks() # self.GraphStrain_EventsPerMinutePerCellViolin() # self.GraphStrain_SwitchFrequencyPerCellViolin() # self.GraphStrain_SwitchFrequencyPerCell() self.GraphStrain_FractionMovement() self.GraphStrain_SwitchFrequency2() self.GraphStrain_EventsPerMinute2() # self.GraphStrain_EventsPerMinutePerCell() self.GraphHistComparison() # self.GraphStrainMedianValues() # self.GraphStrain_EventsPerMinutePerCellRaw() # self.GraphStrain_SwitchFrequencyPerCellRaw() # Scatter Plots graphscatvars = ( ['Run displacement','Intensity','nm','AU','scatter_intensity_runlength.pdf'], # ['Velocity','Intensity',r'$\mu$m/min','AU','scatter_intensity_velocity.pdf'], # ['Lifetime','Intensity','min','AU','scatter_intensity_lifetime.pdf'], # [ # "Run length", # "Velocity", # r"$\mu$" + "m", # "nm/s", # "scatter_velocity_runlength.pdf", # ], # [ # "Run length", # "Lifetime", # r"$\mu$" + "m", # "min", # "scatter_lifetime_runlength.pdf", # ], ['Velocity','Intensity','nm/s','AU','scatter_intensity_velocity.pdf'], # ["Velocity", "Lifetime", "nm/s", "min", "scatter_lifetime_velocity.pdf"], # [ # "Run length", # "Average distance from SPB", # r"$\mu$" + "m", # r"$\mu$m", # "scatter_avgSPBdistance_runlength.pdf", # ], # [ # "Velocity", # "Average distance from SPB", # "nm/s", # r"$\mu$m", # "scatter_avgSPBdistance_velocity.pdf", # ], # [ # "Lifetime", # "Average distance from SPB", # "min", # r"$\mu$m", # "scatter_avgSPBdistance_lifetime.pdf", # ], ['Lifetime','Intensity','s','AU','scatter_intensity_lifetime.pdf'], ) for x,y,xunit,yunit,figname in graphscatvars: self.GraphStrainScatter( strains,x,y,xlab=x,ylab=y,xunit=xunit,yunit=yunit,figname=figname ) # self.GraphStrain_EventsPerMinute() # self.GraphStrain_AvgStartEnd() # self.GraphStrain_StateTimes() # self.GraphStrain_SwitchCounts() # self.GraphStrain_StateSwitchMatrix() # }}} # GraphHistComparison {{{ def GraphHistComparison(self): def plot_median_special(ax, xloc, rel_height, col): (ybottom, ytop) = ax.get_ylim() ax.plot( [xloc, xloc], [ybottom, rel_height * ytop], color=col, linewidth=1.5, alpha=0.3, solid_capstyle="round", ) ax.plot( [xloc], [rel_height * ytop], marker="d", color=col, alpha=0.6, markersize=6, ) return ax graphhistvars = ( [ "GetRunLengths", "Run displacement", "Count", "nm", "strain_runlength.pdf", ], ["GetVelocities", "Velocity", "Count", "nm/s", "strain_velocity.pdf"], ["GetLifetimes", "Lifetime", "Count", "s", "strain_lifetime.pdf"], [ "GetAverageDistances", "Average distance from SPB", "Count", "nm", "strain_avg_pos.pdf", ], ) # Special x limits # xmaxes = { # 'Run displacement': 1.6, # 'Velocity': 100.0, # 'Lifetime': 1.6, # 'Average distance from SPB': 8.0, # } # if self.config['paperFigure'] == 5: # xmaxes['Velocity'] = 60.0 xmaxes = { "Run displacement": 1600, "Velocity": 100.0, "Lifetime": 100, "Average distance from SPB": 8000, } ymax_scaling = { "Run displacement": 2000, "Velocity": 50, "Lifetime": 100, "Average distance from SPB": 10000, } if self.config["paperFigure"] == 5: xmaxes["Velocity"] = 60.0 nStrain = len(self.strains) for fcn, xlab, ylab, unit, figname in graphhistvars: # Make a figure. Two axes (one for poleward, one for antipoleward) fig, ax = plt.subplots(figsize=(6, 3)) cols1 = [cstrain.color for cstrain in self.strains] cols2 = [cstrain.color for cstrain in self.strains] # cols1 = [[68, 111, 200],[220, 95, 60]] # cols1 = [tuple(np.array(x)/255) for x in cols1] # cols2 = [[68, 111, 200],[220, 95, 60]] # cols2 = [tuple(np.array(x)/255) for x in cols2] # list for medians medians = {} original_stdout = ( sys.stdout ) # Save a reference to the original standard output # Save to stats print(os.getcwd()) with open("stats.txt", "a") as f: sys.stdout = f # Change the standard output to the file we created. print("-" * 30) print("\nParameter = {0}".format(xlab)) sys.stdout = original_stdout # Reset the standard output to its original value # Display # Make histograms for each strain for strain, col1, col2 in zip(self.strains, cols1, cols2): # Get data funcData = getattr(strain, fcn) dataPAP = funcData() dataAll = np.hstack((-1 * np.array(dataPAP[0]), dataPAP[1])) # bins and histogram nbins = 16 bins = np.linspace(-1 * xmaxes[xlab], xmaxes[xlab], nbins + 1) # ax.hist( dataAll, bins, density=True, edgecolor='k', alpha=0.6, color = col, label='{0} (N={1})'.format(strain.label, len(dataAll))) if self.config["paperFigure"] == 7: print("Skip WT histograms for TD cell") else: _, _, patches = ax.hist( dataAll, bins, density=True, edgecolor="white", linewidth=1.0, alpha=0.6, color=col2, ) for i in range(0, int(nbins / 2)): patches[i].set_facecolor(col1) patches[i].set_hatch("////") # Draw y-axis in middle ax.axvline(x=0, c="black", lw=1.5) # Add medians info medians[strain.label] = dict( zip( ["P", "AP"], [fac * np.median(db) for db, fac in zip(dataPAP, [-1, 1])], ) ) # ax.hist( [], bins, edgecolor='white', linewidth=1.0, alpha=0.6, color = col2, label='{0} (N={1})'.format(strain.label, len(dataAll))) ax.hist( [], bins, edgecolor="white", linewidth=1.0, alpha=0.6, color=col2, label="{0}".format(strain.label), ) # Print Info # Save to stats with open("stats.txt", "a") as f: sys.stdout = f # Change the standard output to the file we created. print("Strain: {0}".format(strain.label)) print("Poleward:") print("\tN = {0}".format(len(dataPAP[0]))) print("\tMedian = {0:.3f} {1}".format(np.median(dataPAP[0]), unit)) print("\tMean = {0:.3f} {1}".format(np.mean(dataPAP[0]), unit)) print( "\tStandard Dev = {0:.3f} {1}".format(np.std(dataPAP[0]), unit) ) print( "\tStandard Error = {0:.3f} {1}".format( np.std(dataPAP[0]) / np.sqrt(len(dataPAP[0])), unit ) ) print("Antipoleward:") print("\tN = {0}".format(len(dataPAP[1]))) print("\tMedian = {0:.3f} {1}".format(np.median(dataPAP[1]), unit)) print("\tMean = {0:.3f} {1}".format(np.mean(dataPAP[1]), unit)) print( "\tStandard Dev = {0:.3f} {1}".format(np.std(dataPAP[1]), unit) ) print( "\tStandard Error = {0:.3f} {1}".format( np.std(dataPAP[1]) / np.sqrt(len(dataPAP[1])), unit ) ) sys.stdout = original_stdout # Reset the standard output to its original value # Display print("-" * 30) # Get max y value (ceiled to the nearest .01) ytop = 1.25 * max([pp.get_height() for pp in ax.patches]) ymax = math.ceil(ytop * ymax_scaling[xlab]) / (ymax_scaling[xlab]) ax.set_yticks([0, ymax / 2, ymax]) ax.set(xlabel="{0} ({1})".format(xlab, unit)) # Limits and ticks ax.set_xlim(left=-1 * xmaxes[xlab], right=xmaxes[xlab]) ax.set_ylim(bottom=0, top=1.0 * ymax) # Plot medians for strain_name, col in zip(medians.keys(), cols1): meds = medians[strain_name].values() for med in meds: ax = plot_median_special(ax, med, 0.9, col) # Legend if nStrain > 1 and xlab == "Velocity": ax.legend(frameon=False, loc="upper left") # ax.legend(frameon=False) # Set ylabel ax.set(ylabel="Probability density") # XLABELS if xlab == "Lifetime": ax.set_xticks([-100, -50, 0, 50, 100]) ax.set_xticklabels(np.abs(ax.get_xticks())) elif xlab == "Velocity": if self.config["paperFigure"] == 5: ax.set_xticks([-60, -30, 0, 30, 60]) else: ax.set_xticks([-100, -50, 0, 50, 100]) elif xlab == "Average distance from SPB": ax.set_xticks([-8000, -4000, 0, 4000, 8000]) ax.ticklabel_format( style="sci", axis="y", scilimits=(0, 0), useMathText=True ) ax.set_xticklabels(np.abs(ax.get_xticks())) elif xlab == "Run displacement": ax.set_xticks([-1600, -800, 0, 800, 1600]) ax.ticklabel_format( style="sci", axis="y", scilimits=(0, 0), useMathText=True ) plt.tight_layout() plt.savefig(figname) plt.close() # }}} # GraphStrain_SwitchFrequency2 {{{ def GraphStrain_SwitchFrequency2(self, figname="graph_switch_frequency.pdf"): # Graph comparison bar plot for events per minute strains = [strain.label for strain in self.strains] # Data n_events = np.zeros((len(self.strains), 2)) dt = 0 * n_events for idx, strain in enumerate(self.strains): events, times = strain.GetSwitchFrequencyPerMinutePerCell() for je, jt in zip(events["P"], times["P"]): n_events[idx, 0] += je dt[idx, 0] += jt * 60 for je, jt in zip(events["AP"], times["AP"]): n_events[idx, 1] += je dt[idx, 1] += jt * 60 events_per_min = n_events / dt events_per_min_err = np.sqrt(n_events) / dt df = pd.DataFrame( {"Poleward": events_per_min[:, 0], "AntiPoleward": events_per_min[:, 1]}, index=strains, ) # Plot fig, ax = plt.subplots(figsize=(4, 3)) ax = df.plot( kind="bar", ax=ax, color=["Green", "Red"], rot=0, yerr=events_per_min_err, error_kw=dict(ecolor="k"), legend=False, ) ax.set_ylabel("Switching frequency\n(events/sec)") ax.set_xlabel("") num_cells = [0, 0] for idx in range(len(strains)): for kymo in self.strains[idx].kymographs: if kymo.poles != []: tt = kymo.poles[0].time[-1] - kymo.poles[0].time[0] if tt > 10: num_cells[idx] += 1 original_stdout = sys.stdout # Save a reference to the original standard output with open("stats.txt", "a") as f: sys.stdout = f # Change the standard output to the file we created. print("------------------------------") print("\nSwitching Frequency\n") for idx, strain in enumerate(strains): print("Strain: {0}".format(strain)) print(" Num Cells: {0}".format(num_cells[idx])) print(" Poleward Exit") print(" N Events: {0}".format(n_events[idx, 0])) print(" Total Time: {0:.3f}".format(dt[idx, 0])) print( " Switching Freq: {0:.5f} sec^-1".format( events_per_min[idx, 0] ) ) print( " Error in switching freq: {0:.5f} sec^-1".format( events_per_min_err[idx, 0] ) ) print(" AntiPoleward") print(" N Events: {0}".format(n_events[idx, 1])) print(" Total Time: {0:.3f}".format(dt[idx, 1])) print( " Switching Freq: {0:.5f} sec^-1".format( events_per_min[idx, 1] ) ) print( " Error in switching freq: {0:.5f} sec^-1".format( events_per_min_err[idx, 1] ) ) print("------------------------------") sys.stdout = original_stdout # Reset the standard output to its original value # Display # Set y axis limit and ticks (ceil to nearest 0.02) try: if self.config["paperFigure"] == 2: # ax.set_ylim(top=2.4) ax.set_ylim(top=0.024) elif self.config["paperFigure"] == 3: # ax.set_ylim(top=5.0) ax.set_ylim(top=0.06) elif self.config["paperFigure"] == 4: # ax.set_ylim(top=5.0) ax.set_ylim(top=0.06) elif self.config["paperFigure"] == 5: # ax.set_ylim(top=2.4) ax.set_ylim(top=0.024) else: raise exception("unkown value for paperfigure parameter") ymax = ax.get_ylim()[1] except: ymax = np.max((data[:, :2] + data[:, 2:]).flatten()) # ymax = math.ceil(ax.get_ylim()[1]*50)/50 ymax = math.ceil(ymax * 50) / 50 ax.set_ylim(top=1.5 * ymax) ax.set_yticks([0, ymax / 2, ymax]) ax.set_ylim(bottom=0.0) ax.set(xlabel=None) # Scientific notation ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0), useMathText=True) # Set custom patch colors (Poleward_strain1, Poleward_strain2, AntiP_streain1, AntiP_strain2) if len(self.strains) == 1: c1 = self.strains[0].color # c1 = [68, 111, 200] cols = [c1, c1] # cols = [tuple(np.array(x)/255) for x in cols] for idx, (pp, col) in enumerate(zip(ax.patches, cols)): pp.set_facecolor(col) pp.set_alpha(0.6) pp.set_edgecolor("white") if idx < len(strains): pp.set_hatch("////") cols = [c1, c1] cols = [tuple(np.array(x) / 255) for x in cols] labels = ["Poleward", "Antipoleward"] hatching = ["////", ""] handles = [ matplotlib.patches.Rectangle( (0, 0), 1, 1, facecolor=cols[idx], alpha=0.6, label=labels[idx], hatch=hatching[idx], edgecolor="white", ) for idx in range(len(labels)) ] elif len(self.strains) == 2: c1 = self.strains[0].color c2 = self.strains[1].color # c1 = [68, 111, 200] # c2 = [220, 95, 60] cols = [c1, c2, c1, c2] # cols = [tuple(np.array(x)/255) for x in cols] for idx, (pp, col) in enumerate(zip(ax.patches, cols)): pp.set_facecolor(col) pp.set_alpha(0.6) pp.set_edgecolor("white") if idx < len(strains): pp.set_hatch("////") cols = [c1, c1, c2, c2] # cols = [tuple(np.array(x)/255) for x in cols] labels = ["Poleward", "Antipoleward", "Poleward", "Antipoleward"] hatching = ["////", "", "////", ""] handles = [ matplotlib.patches.Rectangle( (0, 0), 1, 1, facecolor=cols[idx], alpha=0.6, label=labels[idx], hatch=hatching[idx], edgecolor="white", ) for idx in range(len(labels)) ] else: raise Exception("only coded for 1 or 2 strains") # ax.legend(handles, labels, loc='upper left', frameon=False) ax.legend("", frameon=False) plt.tight_layout() fig.savefig(figname) plt.close() # }}} # GraphStrain_EventsPerMinute2 {{{ def GraphStrain_EventsPerMinute2(self, figname="graph_events_per_second.pdf"): # Graph comparison bar plot for events per minute strains = [strain.label for strain in self.strains] # Data n_events = np.zeros((len(self.strains), 2)) dt = 0 * n_events for idx, strain in enumerate(self.strains): events, times = strain.GetDirectionalEventsPerMinutePerCell() for je, jt in zip(events["P"], times["P"]): n_events[idx, 0] += je # convert times to seconds dt[idx, 0] += jt * 60 for je, jt in zip(events["AP"], times["AP"]): n_events[idx, 1] += je dt[idx, 1] += jt * 60 events_per_min = n_events / dt events_per_min_err = np.sqrt(n_events) / dt df = pd.DataFrame( {"Poleward": events_per_min[:, 0], "AntiPoleward": events_per_min[:, 1]}, index=strains, ) # Plot fig, ax = plt.subplots(figsize=(4, 3)) ax = df.plot( kind="bar", ax=ax, color=["Green", "Red"], rot=0, yerr=events_per_min_err, error_kw=dict(ecolor="k"), legend=False, ) ax.set_ylabel("Directional events \n per second") ax.set_xlabel("") num_cells = [0, 0] for idx in range(len(strains)): for kymo in self.strains[idx].kymographs: if kymo.poles != []: tt = kymo.poles[0].time[-1] - kymo.poles[0].time[0] if tt > 10: num_cells[idx] += 1 original_stdout = sys.stdout # Save a reference to the original standard output with open("stats.txt", "a") as f: sys.stdout = f # Change the standard output to the file we created. print("------------------------------") print("\nEvents per second\n") for idx, strain in enumerate(strains): print("Strain: {0}".format(strain)) print(" Num Cells: {0}".format(num_cells[idx])) print(" Poleward") print(" N Events: {0}".format(n_events[idx, 0])) print(" Total Time: {0:.3f} sec".format(dt[idx, 0])) print( " Events per sec: {0:.5f} sec^-1".format( events_per_min[idx, 0] ) ) print( " Error in events per sec: {0:.5f} sec^-1".format( events_per_min_err[idx, 0] ) ) print(" AntiPoleward") print(" N Events: {0}".format(n_events[idx, 1])) print(" Total Time: {0:.3f} sec".format(dt[idx, 1])) print( " Events per sec: {0:.5f} sec^-1".format( events_per_min[idx, 1] ) ) print( " Error in events per sec: {0:.5f} sec^-1".format( events_per_min_err[idx, 1] ) ) print("------------------------------") sys.stdout = original_stdout # Reset the standard output to its original value # Display # Set y axis limit and ticks (ceil to nearest 0.02) try: if self.config["paperFigure"] == 2: # ax.set_ylim(top=0.8) ax.set_ylim(top=0.014) elif self.config["paperFigure"] == 3: # ax.set_ylim(top=1.8) ax.set_ylim(top=0.03) elif self.config["paperFigure"] == 4: # ax.set_ylim(top=1.8) ax.set_ylim(top=0.03) elif self.config["paperFigure"] == 5: # ax.set_ylim(top=0.8) ax.set_ylim(top=0.014) else: raise exception("unkown value for paperfigure parameter") ymax = ax.get_ylim()[1] except: ymax = np.max((data[:, :2] + data[:, 2:]).flatten()) # ymax = math.ceil(ax.get_ylim()[1]*50)/50 ymax = math.ceil(ymax * 50) / 50 ax.set_ylim(top=1.5 * ymax) ax.set_yticks([0, ymax / 2, ymax]) ax.set_ylim(bottom=0.0) ax.set(xlabel=None) # Scientific notation ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0), useMathText=True) if len(self.strains) == 1: c1 = self.strains[0].color # c1 = [68, 111, 200] cols = [c1, c1] # cols = [tuple(np.array(x)/255) for x in cols] for idx, (pp, col) in enumerate(zip(ax.patches, cols)): pp.set_facecolor(col) pp.set_alpha(0.6) pp.set_edgecolor("white") if idx < len(strains): pp.set_hatch("////") cols = [c1, c1] cols = [tuple(np.array(x) / 255) for x in cols] labels = ["Poleward", "Antipoleward"] hatching = ["////", ""] handles = [ matplotlib.patches.Rectangle( (0, 0), 1, 1, facecolor=cols[idx], alpha=0.6, label=labels[idx], hatch=hatching[idx], edgecolor="white", ) for idx in range(len(labels)) ] elif len(self.strains) == 2: c1 = self.strains[0].color c2 = self.strains[1].color # c1 = [68, 111, 200] # c2 = [220, 95, 60] cols = [c1, c2, c1, c2] # cols = [tuple(np.array(x)/255) for x in cols] for idx, (pp, col) in enumerate(zip(ax.patches, cols)): pp.set_facecolor(col) pp.set_alpha(0.6) pp.set_edgecolor("white") if idx < len(strains): pp.set_hatch("////") cols = [c1, c1, c2, c2] # cols = [tuple(np.array(x)/255) for x in cols] labels = ["Poleward", "Antipoleward", "Poleward", "Antipoleward"] hatching = ["////", "", "////", ""] handles = [ matplotlib.patches.Rectangle( (0, 0), 1, 1, facecolor=cols[idx], alpha=0.6, label=labels[idx], hatch=hatching[idx], edgecolor="white", ) for idx in range(len(labels)) ] else: raise Exception("only coded for 1 or 2 strains") ax.legend(handles, labels, loc="upper left", frameon=False) plt.tight_layout() fig.savefig(figname) plt.close() # }}} # GraphStrain_FractionMovement {{{ def GraphStrain_FractionMovement(self, figname="graph_fraction_kymo_movement.pdf"): # Graph comparison bar plot for events per minute fracMove = [ strain.GetFractionKymographsWithMovement() for strain in self.strains ] n_total = [len(strain.kymographs) for strain in self.strains] n_move = [int(jp * np) for jp, np in zip(fracMove, n_total)] strains = [strain.label for strain in self.strains] # Colors cols1 = [cstrain.color for cstrain in self.strains] num_cells = [0, 0] for idx in range(len(strains)): for kymo in self.strains[idx].kymographs: if kymo.poles != []: tt = kymo.poles[0].time[-1] - kymo.poles[0].time[0] if tt > 10: num_cells[idx] += 1 original_stdout = sys.stdout # Save a reference to the original standard output with open("stats.txt", "a") as f: sys.stdout = f # Change the standard output to the file we created. print("------------------------------") print("\nFraction kymograph movement\n\n") for idx, strain in enumerate(strains): print(" Strain: {0}".format(strain)) print(" Percentage: {0:.3f}".format(fracMove[idx])) print(" N: {0}\n".format(num_cells[idx])) print("------------------------------") sys.stdout = original_stdout # Reset the standard output to its original value # Display # Bar plot fig, ax = plt.subplots(figsize=(4, 3)) ax.bar(strains, fracMove, color=cols1, width=0.5, alpha=0.6) # ax.set_xlabel("Strain") ax.set_ylabel("Fraction of cells\nwith movement") ax.set_ylim(top=1.0) ax.set_yticks([0.0, 0.5, 1.0]) ax.set_xlim(left=-0.75, right=len(strains) - 1 + 0.75) handles = [ plt.Rectangle((0, 0), 1, 1, color=cols1[idx], alpha=0.6) for idx in range(len(strains)) ] # plt.legend(handles, strains, loc='upper left', frameon=False) plt.tight_layout() fig.savefig(figname) plt.close() # }}} # GraphStrainMedianValues{{{ def GraphStrainMedianValues(self, figname="graph_median_lifetime.pdf"): # Graph comparison bar plot for median lifetime graphhistvars = ( [ "GetRunLengths", "Run displacement", r"$\mu$" + "m", "strain_median_runlength.pdf", ], [ "GetVelocities_nm_per_sec", "Velocity", "nm/s", "strain_median_velocity.pdf", ], ["GetLifetimes_min", "Lifetime", r"min", "strain_median_lifetime.pdf"], [ "GetAverageDistances", "Average distance from SPB", r"$\mu$" + "m", "strain_median_avg_pos.pdf", ], ) for fcn, ylab, unit, figname in graphhistvars: # Data # Row: strains # Col: Poleward Mean, AntiPoleward Mean, Poleward STD, Antipoleward STD data = np.zeros((len(self.strains), 4)) count = np.zeros(len(self.strains)) for idx, strain in enumerate(self.strains): funcData = getattr(strain, fcn) events = funcData() count[idx] = len(strain.kymographs) data[idx, 0] = np.mean(events[0]) data[idx, 2] = np.std(events[0]) / np.sqrt(count[idx]) data[idx, 1] = np.mean(events[1]) data[idx, 3] = np.std(events[1]) / np.sqrt(count[idx]) strains = [strain.label for strain in self.strains] # Create pd Dataframe for plotting df = pd.DataFrame( data, columns=["Poleward", "Antipoleward", "std_P", "std_AP"], index=strains, ) # Plot fig, ax = plt.subplots(figsize=(4, 3)) # convert the std columns to an array yerr = df[["std_P", "std_AP"]].to_numpy().T ax = df[["Poleward", "Antipoleward"]].plot( kind="bar", ax=ax, color=["Green", "Red"], rot=0, # yerr=yerr, error_kw=dict(ecolor='k'),legend=False, xlabel=None) legend=False, xlabel=None, ) # ax.set_xlabel("Strain") ax.set_ylabel("Median\n{0}\n({1})".format(ylab, unit)) # Set y axis limit and ticks (ceil to nearest 0.02) if ylab == "Velocity": # nearest 4 ymax = np.max((data[:, :2] + data[:, 2:]).flatten()) ymax = math.ceil(ymax / 4) * 4 else: ymax = np.max((data[:, :2] + data[:, 2:]).flatten()) # ymax = math.ceil(ax.get_ylim()[1]*50)/50 ymax = math.ceil(ymax * 50) / 50 ax.set_ylim(top=1.4 * ymax) ax.set_yticks([0, ymax / 2, ymax]) # for jj in range(2): # ax.text(jj, ymax, 'N cells = {0}'.format(count[jj]), # ha='center', color='black', fontsize=8) # Set custom patch colors (Poleward_strain1, Poleward_strain2, AntiP_streain1, AntiP_strain2) if len(self.strains) == 1: c1 = self.strains[0].color # c1 = [68, 111, 200] cols = [c1, c1] # cols = [tuple(np.array(x)/255) for x in cols] for idx, (pp, col) in enumerate(zip(ax.patches, cols)): pp.set_facecolor(col) pp.set_alpha(0.6) pp.set_edgecolor("white") if idx < len(strains): pp.set_hatch("////") cols = [c1, c1] # cols = [tuple(np.array(x)/255) for x in cols] labels = ["Poleward", "Antipoleward"] hatching = ["////", ""] handles = [ matplotlib.patches.Rectangle( (0, 0), 1, 1, facecolor=cols[idx], alpha=0.6, label=labels[idx], hatch=hatching[idx], edgecolor="white", ) for idx in range(len(labels)) ] elif len(self.strains) == 2: c1 = self.strains[0].color c2 = self.strains[1].color # c1 = [68, 111, 200] # c2 = [220, 95, 60] cols = [c1, c2, c1, c2] # cols = [tuple(np.array(x)/255) for x in cols] for idx, (pp, col) in enumerate(zip(ax.patches, cols)): pp.set_facecolor(col) pp.set_alpha(0.6) pp.set_edgecolor("white") if idx < len(strains): pp.set_hatch("////") cols = [c1, c1, c2, c2] # cols = [tuple(np.array(x)/255) for x in cols] labels = [ "Poleward, {0}".format(strains[0]), "Antipoleward, {0}".format(strains[0]), "Poleward, {0}".format(strains[1]), "Antipoleward, {0}".format(strains[1]), ] hatching = ["////", "", "////", ""] handles = [ matplotlib.patches.Rectangle( (0, 0), 1, 1, facecolor=cols[idx], alpha=0.6, label=labels[idx], hatch=hatching[idx], edgecolor="white", ) for idx in range(len(labels)) ] else: raise Exception("only coded for 1 or 2 strains") ax.legend(handles, labels, loc="upper left", frameon=False) plt.tight_layout() fig.savefig(figname) plt.close() # }}} # GraphStrain_AvgStartEnd {{{ def GraphStrain_AvgStartEnd(self, figname="graph_fraction_kymo_movement.pdf"): freqP = [strain.GetFractionKymographsWithMovement() for strain in self.strains] strains = [strain.label for strain in self.strains] # Create pd Dataframe for plotting seriesP = pd.Series(freqP, index=strains) # Plot fig, ax = plt.subplots(figsize=(6, 4)) df = pd.DataFrame({"Movements": seriesP}) df.plot.bar(ax=ax, color=["RebeccaPurple"], rot=0) ax.set_xlabel("Strain") ax.set_ylabel("Fraction of Cells\nwith Movement") plt.tight_layout() fig.savefig(figname) plt.close() # }}} # GraphStrainScatter {{{ def GraphStrainScatter( self, strains, x, y, xlab=None, ylab=None, xunit="", yunit="", figname="scatter.pdf", ): # Special x limits xmaxes = { "Run displacement": 1600, "Velocity": 60.0, "Lifetime": 80, "Intensity": 1000, } # 2 axes. Poleward and antipoleward fig, axs = plt.subplots(1, 2, figsize=(6, 3), sharey=True) cols = sns.color_palette("husl", len(strains)) directions = ["Poleward", "Antipoleward"] for strain, c in zip(strains, cols): if x == "Intensity": xx = strain.GetIntensities() elif x == "Run displacement": xx = strain.GetRunLengths() elif x == "Velocity": xx = strain.GetVelocities() elif x == "Lifetime": xx = strain.GetLifetimes() # elif x == "Average distance from SPB": # xx = strain.GetAverageDistances() if y == "Intensity": yy = strain.GetIntensities() elif y == "Run displacement": yy = strain.GetRunLengths() elif y == "Velocity": yy = strain.GetVelocities() elif y == "Lifetime": yy = strain.GetLifetimes() # elif y == "Average distance from SPB": # yy = strain.GetAverageDistances() for idx, ax in enumerate(axs): ax.scatter( xx[idx], yy[idx], s=12, alpha=0.8, color=c, edgecolors="none", label=strain.label, ) ax.set_title(directions[idx]) # ax.grid(True) if xlab is not None: axs[0].set_xlabel("{0} ({1})".format(xlab, xunit)) axs[1].set_xlabel("{0} ({1})".format(xlab, xunit)) if ylab is not None: axs[0].set_ylabel("{0} ({1})".format(ylab, yunit)) for ax in axs: ax.legend() ax.set_xlim(left=0, right=xmaxes[x]) ax.set_ylim(bottom=0, top=xmaxes[y]) plt.tight_layout() plt.savefig("scatter_{0}_{1}.pdf".format(x, y)) plt.close() # }}} # GraphSpindleIntensity {{{ def GraphSpindleIntensity(self, strains, lrange=[1, 10], gname=None): # Graph spindle intensity between poles intStrain = np.zeros((len(strains), 100)) cols = sns.color_palette("husl", len(strains)) xran = np.linspace(0, 1, 100) for k, strain in enumerate(strains): fig, ax = plt.subplots(figsize=(9, 6)) # Find spindle intensities for all kymographs intensities = None for i, kymo in enumerate(strain.kymographs): intense = kymo.FindIntensityAlongSpindle(lrange=lrange) if intense is not None: if intensities is None: intensities = np.mean(intense, axis=0) else: intensities = np.vstack((intensities, np.mean(intense, axis=0))) try: intStrain[k, :] = np.mean(intensities, axis=0) except: pdb.set_trace() print("1") # Plot for row in intensities: ax.plot(xran, row, color="blue") ax.plot(xran, np.mean(intensities, axis=0), color="red", linewidth=4) ax.set_ylabel("Intensity (AU)") ax.set_xlabel("Position along spindle (normalized)") ax.set_title("Cut7 intensity - {0}".format(strain.label)) fig.savefig("spindle_intensity_{0}.pdf".format(strain.label)) plt.close() # Make a comparison figure fig, ax = plt.subplots(figsize=(9, 6)) for strn, row, c in zip(strains, intStrain, cols): ax.plot(xran, row, color=c, linewidth=4, label=strn.label) ax.set_ylabel("Intensity (AU)") ax.set_xlabel("Position along spindle (normalized)") ax.set_title("Cut7 intensity") ax.legend() figname = "spindle_intensity_all.pdf" if gname is not None: figname = figname[:-4] + "_{0}.pdf".format(gname) fig.suptitle(gname) fig.savefig(figname) plt.close() # }}} # PlotTracksByState {{{ def PlotTracksByState(self, k=5): # Plot individual curved tracks with poles # Plot all tracks overlayed without poles cols = { "Inactive": "blue", "Poleward": "green", "Antipoleward": "red", } for strain in self.strains: strain.PlotTrackByStates(cols, k=k) # }}} # PlotAllTracks {{{ def PlotAllTracks(self): # Plot all tracks cols = { "Inactive": "blue", "Poleward": "green", "Antipoleward": "red", } for strain in self.strains: strain.PlotAllTracks(cols) # }}} # DisplayTracksStatistics {{{ def DisplayTracksStatistics(self): # Display statistics about the tracks print("------------------------------------------") print("------------------------------------------") print("------------ Track Statistics ------------") print("------------------------------------------\n") print("Number of tracks:") for strain in self.strains: print(" {0} : {1}\n".format(strain.label, len(strain.tracks))) print("------------------------------------------") print("------------------------------------------") # }}} def weighted_avg_and_std(values, weights): """ Return the weighted average and standard deviation. values, weights -- Numpy ndarrays with the same shape. """ average = np.average(values, weights=weights) # Fast and numerically precise: variance = np.average((values - average) ** 2, weights=weights) std = np.sqrt(variance) serr = std / np.sqrt(len(values)) return average, std, serr ######################################################### if __name__ == "__main__": x = KymographAnalysis()
/src/Features.py
#!/usr/bin/env python import os, pdb import numpy as np from scipy import interpolate, signal from .node_graph import Graph import matplotlib.pyplot as plt import math import uuid # Superclass for Poles and tracks that stores positional and intensity information class Feature: def __init__(self, time, position, intensity, strain='xxx', time_step=1): self.time = np.array( time ) self.position = np.array( position ) self.intensity = np.array( intensity ) self.id = uuid.uuid1() self.strain = strain self.time_step = time_step self.pixel_time = self.time / self.time_step # Resample data self.ResampleData() def ResampleData( self, sample_factor=3): # resample data based on time pixels # Define an interpolation function for positions ifunc_pos = interpolate.interp1d( self.time, self.position, kind='linear') # Define a grid of resampled time points self.time = np.linspace( self.time[0], self.time[-1], max([ 2, sample_factor*(self.time[-1]-self.time[0])]) ) if len(self.time) == 1: pdb.set_trace() print('oops') self.position = ifunc_pos( self.time) # Class for a Pole class Pole(Feature): def __init__(self, time, position, intensity=[], time_step=1, strain='xxx'): Feature.__init__(self, time, position, intensity, strain=strain, time_step=time_step) # Define an interpolation/extrapolation function self.ifunc = interpolate.interp1d(self.time, self.position, kind='linear', fill_value='extrapolate') def Print(self): print('Pole :') print(' ID : {}'.format(self.id)) print(' Time : {}'.format( self.time)) print(' Position : {}'.format( self.position)) print(' Intensity : {}'.format( self.intensity)) print('--------------------------------- ') # Class for a Track: additionally stores associated poles and track direction class Track(Feature): def __init__(self, time, position, intensity, poles, direction, line_type, time_step=1, strain='xxx'): Feature.__init__(self, time, position, intensity, time_step=time_step, strain=strain) if time_step == 1: pdb.set_trace() print('woah') self.poles = poles self.direction = direction self.line_type = line_type self.polePosition = [] self.data = { 'pos_pole' : np.zeros( (2, np.size(self.position) ) ), 'pos_track_rel' : [], 'velocity' : { 'P' : [], 'AP' : [],'I': []}, 'runlength' : { 'P' : [], 'AP' : [],'I': []}, 'lifetime' : { 'P' : [], 'AP' : [],'I': []}, 'lifetime_total' : [], 'velocity_mean' : [], 'switch_count' : [], 'switch_total' : [], } self.bad = 0 # Order poles with 1st pole being main pole(closest at start) # self.OrderPoles() # Calcualte spindle length self.CalcSpindleLength() if self.line_type == 'Curve' and self.direction != 'Ambiguous': self.direction = 'Ambiguous' # pdb.set_trace() # print('1') def Analyze(self, ipole=0): # Run useful analysis methods self.CalcPositionTrackRelativeToPole() # Split the track and save analysis tracks_mini, switches = self.SplitTrack( ipole=ipole) for track in tracks_mini: if track.direction == 'Poleward': label = 'P' elif track.direction == 'Antipoleward': label = 'AP' elif track.direction == 'Inactive': label = 'I' else: pdb.set_trace() raise ValueError('line direction is neither poleward nor antipoleward nor inactive') # Calculate and append data of the mini track # Velocity self.data['velocity'][label] += [track.CalcVelocityLinear(ipole=ipole)] # Run length self.data['runlength'][label] += [track.CalcRunLength(ipole=ipole)] # Lifetime self.data['lifetime'][label] += [track.CalcLifetime()] # Combine data from the split tracks self.data['lifetime_total'] = self.CalcLifetime() self.data['velocity_mean'] = self.CalcVelocityMean(ipole=ipole) self.data['switch_count'] = switches # pdb.set_trace() # print('woah') def OrderPoles(self): # Order the poles with the first one being the closest one to the start of the track if len(self.poles) != 2: return pos = self.CalcPositionTrackRelativeToPole() if np.absolute( pos[1,0] ) < np.absolute( pos[0,0]): self.poles = [self.poles[1], self.poles[0]] def CalcSpindleLength(self): # Calculate the spindle length if len(self.poles) != 2: return # Find the distance between the poles for the extent of this track self.spindleLength = np.absolute( self.poles[0].ifunc( self.time) - self.poles[1].ifunc( self.time) ) def CheckViability(self): # Check if the track's time points are always increasing self.bad = 0 # Check track time is always increasing if np.any( np.diff( self.time) <= 0 ): self.bad = 1 return self.bad def CheckLinearLifetime( self, min_lt = 0.5): # Check lifetime is above a min threshold self.bad = 0 if self.line_type == 'Line' and self.CalcLifetime() < min_lt: self.bad = 1 return self.bad def CalcPositionPoleCurrent(self): # Get pole position at the current time (i.e at the times of the track) by using the interpolation/extrapolation function of the pole for idx, pole in enumerate( self.poles) : pos_pole = np.array(pole.ifunc( self.time) ) self.data['pos_pole'][idx,:] = pos_pole return self.data['pos_pole'] def CalcPositionTrackRelativeToPole(self): # Calculate track position relative to the pole pos_pole = self.CalcPositionPoleCurrent() # If bipolar spindle if len( self.poles) == 2: pos_track_rel = np.zeros( np.shape(pos_pole)) for idx,ele in enumerate( pos_pole): pos_track_rel[idx,:] = np.array( self.position - ele) # If monopolar spindle else: pos_track_rel = np.array( self.position - pos_pole ) # pos_track_rel = pos_track_rel[0,:] self.data['pos_track_rel'] = pos_track_rel return pos_track_rel def CalcVelocityLinear(self, ipole=0): # Calculate the velocity of this linear track if self.direction == 'Ambiguous': raise Exception('Track.CalcVelocityLinear() is only defined for tracks with a single direction') # Calc relative positions if not done already if len( self.data['pos_track_rel']) == 0 or not self.data['pos_track_rel'].any(): pos_track_rel = self.CalcPositionTrackRelativeToPole() else: pos_track_rel = self.data['pos_track_rel'] # Check if len(self.time) <= 1: pdb.set_trace() print('oops') # Find Velocity vel = np.average( np.absolute( np.divide( np.diff( pos_track_rel[ipole,:]) , np.diff( self.time) ) ), weights = np.diff(self.time) ) # Check if np.size( vel) > 1: pdb.set_trace() print('1') return vel def CalcRunLength(self, ipole=0): # Calculate the run length of this track # Calc relative positions if not done already if len( self.data['pos_track_rel']) == 0 or not self.data['pos_track_rel'].any(): pos_track_rel = self.CalcPositionTrackRelativeToPole() else: pos_track_rel = self.data['pos_track_rel'] # Find Run length run_length = np.absolute( pos_track_rel[ipole,-1] - pos_track_rel[ipole,0] ) self.data['run_length'] = run_length # Check if np.size( run_length) > 1: pdb.set_trace() print('1') return run_length def CalcLifetime(self): # Calculate the lifetime of this track lifetime = self.time[-1] - self.time[0] self.data['lifetime'] = lifetime return lifetime def CalcVelocityMean(self,ipole=0): # Calculate the mean velocity of this track if self.line_type == 'Curve' and not self.data['velocity']: # Split the track tracks_mini = self.SplitTrack() for track in tracks_mini: vv = track.CalcVelocityLinear() if track.direc == 'Poleward': self.data['velocity']['P'] += [ vv[ipole]] elif self.direc == 'Antipoleward': self.data['velocity']['AP'] += [ vv[ipole]] vel_mu = np.mean( np.concatenate( (self.data['velocity']['P'], self.data['velocity']['AP']) ) ) return vel_mu def CalcSwitchingCount(self): # Calculate the mean velocity of this track if self.line_type == 'Curve' and not self.data['velocity']: # Split the track tracks_mini = self.SplitTrack() for track in tracks_mini: if track.direc == 'Poleward': self.data['velocity']['P'] += [track.CalcVelocityLinear()] elif self.direc == 'Antipoleward': self.data['velocity']['AP'] += [track.CalcVelocityLinear()] vel_mu = np.mean( np.concat( self.data['velocity']['P'], self.data['velocity']['AP']) ) return vel_mu def CalcIntensityMean(self): # Calculate the mean intensity of this track self.data['intensity_mean'] = np.mean( self.intensity) def SplitTrack(self, ipole=0): # Spit curved track into multiple mini unidirectional tracks switches = { 'P' : { 'P' : 0, 'AP': 0, 'I' : 0,}, 'AP' : { 'P' : 0, 'AP': 0, 'I' : 0,}, 'I' : { 'P' : 0, 'AP': 0, 'I' : 0,}, } if self.direction != 'Ambiguous': return [self], switches if self.line_type == 'Line' and self.direction == 'Ambiguous': position = np.absolute( self.CalcPositionTrackRelativeToPole() ) vel = np.mean( np.divide( np.diff( position) , np.diff(self.time) ) ) if abs( vel) < 0.005: self.direction = 'Inactive' elif vel > 0: self.direction = 'Antipoleward' elif vel < 0: self.direction = 'Poleward' return [self], switches # Find track position relative to the pole if len( self.data['pos_track_rel']) == 0 or not self.data['pos_track_rel'].any(): position = self.CalcPositionTrackRelativeToPole() else: position = self.data['pos_track_rel'][ipole,:] position = np.absolute( position) states = [] # Smoothing window: # Use a time-sliding window to find the average velocity, and use that to figure out state def FindStates_RollingWindow( positions, times, t_window, v_cutoff=1): dt = np.mean( np.diff( times) ) n_hwindow = int( np.ceil( t_window / (2*dt)) ) states = [] for i, t in enumerate( times): i_min = max( [ 0, i-n_hwindow]) i_max = min( [ len(times), i+n_hwindow]) vel = np.mean( np.divide( np.diff( positions[i_min:i_max] ) , np.diff( times[i_min:i_max] ) ) ) # pdb.set_trace() # Assign labels based on value of vel if abs( vel) < v_cutoff: states += ['I'] elif vel > 0: states += ['AP'] elif vel < 0: states += ['P'] return states states = FindStates_RollingWindow(position,self.time,5,v_cutoff=0.005) # Remove singly occuring states for cnt, st in enumerate(states): if cnt > 1 and cnt < len(states)-1: if st != states[cnt-1] and st != states[cnt+1]: states[cnt] = states[cnt-1] # set first state to second state if cnt == 0: states[cnt] = states[cnt+1] # set last state to second last state if cnt == len(states)-1: states[cnt] = states[cnt-1] # Count switches and get track indices p_state = 'XXX' track = { 'pos': [], 'time': [], 'dir':[] } idx = [0 , 0] for cnt, st in enumerate(states): if cnt == 0: p_state = st idx[0] = 0 continue if st == p_state: idx[1] += 1 if st != p_state: # store old stuff pos = self.position[ idx[0]: idx[1]+2] # pos.tolist() time = self.time[ idx[0]: idx[1]+2] # time.tolist() track['pos'] += [pos] track['time'] += [time] track['dir'] += [p_state] p_state = st # begin new idx[0] = cnt idx[1] = cnt # Store the last info if cnt == len(states)-1: pos = self.position[ idx[0]: idx[1]+1] # pos.tolist() time = self.time[ idx[0]: idx[1]+1] # time.tolist() track['pos'] += [pos] track['time'] += [time] track['dir'] += [p_state] # record switches for cnt, dd in enumerate( track['dir']): if cnt == 0: continue switches[ track['dir'][cnt-1]][track['dir'][cnt]] += 1 # Create track objects from the information mini_tracks = [] for time, pos, direc in zip( track['time'], track['pos'], track['dir']): if direc is 'P': direction = 'Poleward' elif direc is 'AP': direction = 'Antipoleward' elif direc is 'I': direction = 'Inactive' pos = pos.tolist() time = time.tolist() if len(pos) == 1: pdb.set_trace() print('oops') mini_tracks += [Track( time, pos, self.intensity, self.poles, direction, 'Line', time_step=self.time_step, strain=self.strain)] # if self.strain == 'B PA-GFP' and mini_tracks[0].direction == 'Inactive': # pdb.set_trace() # print('1') for t in mini_tracks: if len( t.position) < 2: pdb.set_trace() print('oops') return mini_tracks, switches def PlotCurveWithStates(self, figname='curved_track.pdf'): # Plot a curve with states( inactive, poleward and antipoleward) in different colors. cols = { 'Inactive' : 'blue', 'Poleward' : 'red', 'Antipoleward' : 'green', } minis, switches = self.SplitTrack() # Generate figure and axes and set colors fig = plt.figure( figsize=(6,4) ) ax = fig.add_subplot(111) pos_pole = self.CalcPositionPoleCurrent() for idx,pole in enumerate(self.poles): ax.plot( pos_pole[idx,:], self.time, linewidth=3 ) for trk in minis: ax.plot( trk.position, trk.time, linewidth=2, color=cols[trk.direction] ) plt.text(1,1, 'Poleward', color='red', transform=ax.transAxes, ha='right', va='top') plt.text(1,0.95, 'AntiPoleward', color='green', transform=ax.transAxes, ha='right', va='top') plt.text(1,0.9, 'Inactive', color='blue', transform=ax.transAxes, ha='right', va='top') plt.text(1,0.85, 'MainPole', color='skyblue', transform=ax.transAxes, ha='right', va='top') plt.text(1,0.8, 'SecondaryPole', color='orange', transform=ax.transAxes, ha='right', va='top') # Set axes limits axes = plt.gca() x_min = min([ min(self.position), min([min( pol.position) for pol in self.poles]) ]) -0.5 x_max = max([ max( self.position), max([max( pol.position) for pol in self.poles]) ]) +0.5 axes.set_xlim([ x_min, x_max]) axes.set_xlabel('Position') axes.set_ylim([ min(self.time)-5,max(self.time)+5]) axes.set_ylabel('Time') fig.savefig( figname) def Trim(self,lrange=[0,100]): # Trim the track to be inside the range specified if len( self.poles) == 1: return self # Get indices of times when spindle length is between the given range values lens = self.spindleLength idx = np.argwhere( (lens > lrange[0]) & (lens < lrange[1]) ).T[0].tolist() if len(idx) == 0: return None idx = range( idx[0], idx[-1]+1) # Create the new trimmed track tracknew = Track( self.time[idx], self.position[idx], self.intensity, self.poles, self.direction, self.line_type, time_step=self.time_step, strain=self.strain) return tracknew def Print(self): print('Feature :') print(' ID : {}'.format(self.id)) print(' Direction : {}'.format( self.direction)) print(' Line type : {}'.format( self.line_type)) print(' Time : {}'.format( self.time)) print(' Position : {}'.format( self.position)) print(' Intensity : {}'.format( self.intensity)) print('--------------------------------- ')
/src/Kymograph.py
#!/usr/bin/env python import os, pdb import math import numpy as np from scipy import interpolate from .node_graph import Graph import matplotlib.pyplot as plt from .Features import * from .ReadFiles import * ''' Name: Kymograph.py Description: Parses general, poles and feature information for a single kymograph and stores the data accordingly ''' class Kymograph: def __init__(self, fname='example.txt'): # get label without '.txt' self.label = fname[:-9] # Read file information self.general,self.poles,self.tracks = ReadTxt( fname) # Remove Bad tracks self.RemoveBadTracks() # Merge tracks whose ends are close enough self.MergeTracks(self.tracks) # Order track poles for track in self.tracks: track.OrderPoles() # Trim tracks based on kmeans label # self.TrimTracksKmeansLabel() def RemoveBadTracks(self): # Remove tracks that go backward in time # Find bad tracks bad_tracks = [] for track in self.tracks: if not track.CheckViability(): bad_tracks += [track] if len( bad_tracks) != 0: print('Found some bad tracks') # Remove bad tracks for track in bad_tracks: self.tracks.remove( track) def TrimBasedOnTime(self, time_keep=[-1,-1]): # Trim poles poles_new = [] for pole in self.poles: trimmed = pole.TrimBasedOnTime(time_keep) if trimmed is not np.nan and trimmed is not None: poles_new.append(trimmed) # print(poles_new) # if self.label == '/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/bipolar/wild type/MAX_1032_100msR_50msG_7Z_004_cell A_KYMOGRAPH': # pdb.set_trace() self.poles= poles_new # Trim tracks tracks_new = [] for track in self.tracks: trimmed = track.TrimBasedOnTime(time_keep) if trimmed is not np.nan and trimmed is not None: # trimmed.poles = poles_new tracks_new.append(trimmed) # if self.label == '/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/bipolar/wild type/MAX_1032_100msR_50msG_7Z_004_cell A_KYMOGRAPH': # pdb.set_trace() # print(tracks_new) self.tracks = tracks_new def MergeTracks(self, tracks): # Merge linear tracks into a single bidirectional track # Represent tracks as nodes in a directional graph box_half_width = 0.15 box_height = 2*self.general['time_step'] g = Graph( len( tracks) ) matches = [[] for i in range( len(tracks) )] dist = [[] for i in range( len(tracks) )] # For each node, find prospective matches for v, trackv in enumerate( tracks): # Find all possible matches for w, trackw in enumerate( tracks): # if tracks are close together if ( trackv.position[-1]-box_half_width < trackw.position[0] < trackv.position[-1]+box_half_width ) and ( trackv.time[-1] < trackw.time[0] < trackv.time[-1]+box_height ): # Add as a possible match matches[v].append(w) # find distance of match t1 = [ trackv.position[-1], trackv.time[-1]] t2 = [ trackw.position[0], trackw.time[0]] dist[v].append( math.sqrt( ((t1[0]-t2[0])**2)+((t1[1]-t2[1])**2) ) ) # Find the best match for v, trackv in enumerate( tracks): if len( matches[v]) == 0: continue # Find match with lowest distance w = matches[v][dist[v].index( min( dist[v]) )] # Add edge between v and w g.addEdge(v,w) # Find connected components cc = g.connectedComponents() # Merge the tracks in time order tracks_merged = [] for comp in cc: time = None position = None if len( comp) == 1: line_type = tracks[comp[0]].line_type direction = tracks[comp[0]].direction else: line_type = 'Curve' direction = 'Ambiguous' for v in comp: if time is None: time = tracks[v].time else: time = np.concatenate( (time, tracks[v].time) ) if position is None: position = tracks[v].position else: position = np.concatenate( (position, tracks[v].position) ) tracks_merged += [Track(time, position, self.general['image'], self.poles, direction, line_type, time_step = self.general['time_step'], pos_step=self.tracks[0].pos_step)] return tracks_merged def PlotTracks( self, tracks, poles=[], figName='tracks.pdf'): # Plot the given tracks in a figure # Number of tracks nt = len(tracks) # Number of poles np = len(poles) # Number of plots nn = nt+np # Colormap cm = plt.get_cmap('gist_rainbow') # Generate figure and axes and set colors fig = plt.figure( figsize=(12,8) ) ax = fig.add_subplot(111) ax.set_prop_cycle(color=[cm( 1.*i/nn) for i in range(nt)]) for idx,pole in enumerate(poles): ax.plot( pole.position, pole.time, linewidth=3, label = 'Pole {}'.format(1+idx)) for idx,track in enumerate(tracks): ax.plot( track.position, track.time, linewidth=2, label = 'Track {}'.format(1+idx)) plt.legend() # Set axes limits time_max = max( [max(trk.time) for trk in tracks] + [max(pol.time) for pol in poles] ) time_min = min( [min(trk.time) for trk in tracks] + [min(pol.time) for pol in poles] ) x_max = max( [max(trk.position) for trk in tracks] + [max(pol.position) for pol in poles] ) + 0.5 x_min = min( [min(trk.position) for trk in tracks] + [min(pol.position) for pol in poles] ) - 0.5 axes = plt.gca() axes.set_xlim([x_min, x_max]) axes.set_ylim([time_min,time_max]) fig.savefig( figName ) def FindIntensityAlongSpindle(self, lrange=[0, 10]): if len( self.poles) != 2: return None # pdb.set_trace() dimT = np.shape( self.general['image'])[0] dimX = np.shape( self.general['image'])[1] # interpolation function for image try: f = interpolate.interp2d( self.tracks[0].pos_step*np.arange(0,dimX), self.tracks[0].time_step*np.arange(0,dimT), self.general['image']) except: pdb.set_trace() print('1') # Get times to find pole position tStart = max( self.poles[0].time[0], self.poles[1].time[0]) tEnd = min( self.poles[0].time[-1], self.poles[1].time[-1]) tVec = np.linspace( tStart, tEnd, math.ceil( (tEnd-tStart)/self.tracks[0].time_step) ) # Get pole position pos0 = self.poles[0].ifunc( tVec) pos1 = self.poles[1].ifunc( tVec) # pdb.set_trace() # Trim to be within range pos0c = [i for i,j in zip(pos0,pos1) if np.abs(i-j) > lrange[0] and np.abs(i-j) < lrange[1]] pos1c = [j for i,j in zip(pos0,pos1) if np.abs(i-j) > lrange[0] and np.abs(i-j) < lrange[1]] tVecc = [k for i,j,k in zip(pos0,pos1,tVec) if np.abs(i-j) > lrange[0] and np.abs(i-j) < lrange[1]] if len(pos0c) == 0: return None # Find intensity between poles for each time value intense = np.zeros( (len(tVecc),100) ) for i, tt in enumerate(tVecc): pVec = np.linspace( pos0c[i], pos1c[i],100) ttVec = tt*np.ones((100,)) intense[i,:] = f( pVec, ttVec)[0,:] return intense # # Trim tracks based on kmeans label # def TrimTracksKmeansLabel(self, label=-1): def DisplayTracks(self, ax=None): if ax is None: fig, ax = plt.subplots(figsize=(6,6)) # Display kymograph image ax.imshow( self.tracks[0].image) # Plot tracks for track in self.tracks: ax.plot( track.position/track.pos_step, track.time/track.time_step, color='red', linewidth=3) plt.show() def Print(self): # Print information about poles and tracks print(' ') print(' path: {}'.format(self.general['path_tiff'][0:-1])) print(' name: {}'.format(self.label )) print(' n_poles_exp: {}'.format(self.general['n_poles'])) print(' n_poles_found: {}'.format(len(self.poles))) print(' n_tracks_exp: {}'.format(self.general['n_tracks'])) print(' n_tracks_found: {}'.format( len(self.tracks))) print(' ') for feat in self.poles+self.tracks: feat.Print() ########################################## if __name__ == "__main__": print('No default run method')
/src/Load.py
#!/usr/bin/env python import os, pdb import yaml import glob ''' Name: Load.py Description: loads and splits the tracks saved by the trackBuilder (kyman.mlapp) into general, poles and feature sections to be parsed by Kymograph.py ''' # Class to load data from files class Load: def __init__(self, verbose=0): file_name = 'track_files.yaml' with open(file_name) as infile: self.data = yaml.load(infile) self.verbose = verbose self.GetFilenames() self.ReadFromFiles() def GetFilenames(self): # Expand filenames in the case of special characters for strain, dat in self.data['strain'].items(): for idx,fpath in enumerate( dat['path']): files = [] for fname in dat['files'][idx]: temp = glob.glob( os.path.join(fpath,fname) ) for fil in temp: head_tail = os.path.split(fil) files += [ head_tail[1] ] self.data['strain'][strain]['files'][idx] = files def ReadFromFiles(self): # Read information from all files given yaml data for strain, dat in self.data['strain'].items(): for idx,fpath in enumerate( dat['path']): self.data['strain'][strain]['geninfo'] = [] self.data['strain'][strain]['polesinfo'] = [] self.data['strain'][strain]['featureinfo'] = [] for fname in dat['files'][idx]: gen, poles, feats = self.ReadFromFile( fpath, fname) self.data['strain'][strain]['geninfo'] += [gen] self.data['strain'][strain]['polesinfo'] += [poles] self.data['strain'][strain]['featureinfo'] += [feats] def ReadFromFile(self, fpath, fname): # Read data from files and parse into general, poles and feature information # Initialize lists geninfo = [] polesinfo = [] featureinfo = [] if self.verbose: self.PrintFile( fname) # Add General Information with open(fpath + fname) as fp: addLine = None for cnt, line in enumerate(fp): if line.find( 'General Information') > -1: addLine = 'G' if line.find( 'Poles Information') > -1: addLine = 'P' if line.find( 'Feature Information') > -1: addLine = 'F' # Add General Information if addLine == 'G': geninfo.append( line) # Add Poles Information elif addLine == 'P': polesinfo.append( line) # Add Feature Information elif addLine == 'F': featureinfo.append( line) return geninfo, polesinfo, featureinfo def PrintFile(self, fname): # Print all the information from a file to screen fp = open( self.fpath + fname) fc = fp.read() print(fc) fp.close() ########################################## if __name__ == "__main__": x = Load(verbose=1)
/src/ReadFiles.py
#!/usr/bin/env python import os, pdb import math import numpy as np from .Track import * import matplotlib.pyplot as plt def ReadTxt( fname, verbose=0): # Read data from files and parse into general, poles and feature information if verbose: PrintFile( fname) # Initialize lists geninfo = [] polesinfo = [] featureinfo = [] # Add General Information with open(fname) as fp: addLine = None for cnt, line in enumerate(fp): if line.find( 'General Information') > -1: addLine = 'G' if line.find( 'Poles Information') > -1: addLine = 'P' if line.find( 'Feature Information') > -1: addLine = 'F' # Add General Information if addLine == 'G': geninfo.append( line) # Add Poles Information elif addLine == 'P': polesinfo.append( line) # Add Feature Information elif addLine == 'F': featureinfo.append( line) # Parse information general = ParseGeneralInfo( fname, geninfo) poles = ParsePolesInfo( polesinfo, general) tracks = ParseTracksInfo( featureinfo, poles, general) if polesinfo == []: pdb.set_trace() hi = 1 return general, poles, tracks def ParseGeneralInfo( fname, geninfo): # Parse information about general information general = { 'path_tiff' : [], 'type' : [], 'time_start' : [], 'time_end' : [], 'time_step' : [], 'n_poles' : [], 'n_tracks': [], 'image': [], } for line in geninfo: # Tiff Path path_tiff = FindSingleSubstring( line, 'Tiff path : ') if path_tiff is not None: general['path_tiff'] = path_tiff # Spindle Type typ = FindSingleSubstring( line, 'Spindle type : ') if typ is not None: general['type'] = typ # Time Start time_start = FindNumbers( line, 'Start time (s) : ') if time_start is not None: general['time_start'] = time_start # Time End time_end = FindNumbers( line, 'End time (s) : ') if time_end is not None: general['time_end'] = time_end # Time Step time_step = FindNumbers( line, 'Time step (s) : ') if time_step is not None: general['time_step'] = time_step[0] # Num Poles npoles = FindNumbers( line, 'Num poles : ') if npoles is not None: general['n_poles'] = int( npoles[0]) # Num Tracks ntracks = FindNumbers( line, 'Num tracks : ') if ntracks is not None: general['n_tracks'] = int( ntracks[0]) general['image'] = LoadTiff( fname[:-9]+'.tif') return general def ParsePolesInfo( polesinfo, general): # Parse information about poles if not polesinfo or len(polesinfo) == 0: print('No poles information here') return # Determine number of poles and split information polelist = [] idxPole = None nPoles = 0 for line in polesinfo: # Look for the next pole if line.find( 'Pole number : {}'.format( nPoles+1)) > -1: nPoles += 1 if nPoles == 0: continue if nPoles != len(polelist): polelist += [[line]] else: polelist[ nPoles-1] += [line] # print('Found {} poles'.format( nPoles) ) # for each split pole, get useful information and initialize a Pole object poles = [] for pole in polelist: for line in pole: # Time pixels if FindNumbers( line, 'Time pixel : ') is not None: time = FindNumbers( line, 'Time pixel : ') time = [x * general['time_step'] for x in time] # # Times # if FindNumbers( line, 'Time (s) : ') is not None: # time = FindNumbers( line, 'Time (s) : ') # Position if FindNumbers( line, 'Position (um) : ') is not None: position = FindNumbers( line, 'Position (um) : ') # Intensity if FindNumbers( line, 'Intensity : ') is not None: intensity = FindNumbers( line, 'Intensity : ') poles += [Pole( time, position, general['image'], time_step=general['time_step']) ] return poles def ParseTracksInfo( featureinfo, poles, general): # Parse information about tracks if not featureinfo or len(featureinfo) == 0: print('No tracks information here') return # Determine number of tracks and split information tracklist = [] idxTrack = None nTracks = 0 for line in featureinfo: # Look for the next track if line.find( 'Feature number : {}'.format( nTracks+1)) > -1: nTracks += 1 if nTracks == 0: continue if nTracks != len(tracklist): tracklist += [[line]] else: tracklist[ nTracks-1] += [line] # print('Found {} tracks'.format( nTracks) ) # for each split track, get useful information and initialize a Track object tracks = [] for trck in tracklist: for line in trck: # Time pixels if FindNumbers( line, 'Time pixel : ') is not None: time = FindNumbers( line, 'Time pixel : ') timePix = time time = [x * general['time_step'] for x in time] # # Time # if FindNumbers( line, 'Time (s) : ') is not None: # time = FindNumbers( line, 'Time (s) : ') # Position if FindNumbers( line, 'Position pixel : ') is not None: positionPix = FindNumbers( line, 'Position pixel : ') # Position if FindNumbers( line, 'Position (um) : ') is not None: position = FindNumbers( line, 'Position (um) : ') # Intensity if FindNumbers( line, 'Intensity : ') is not None: intensity = FindNumbers( line, 'Intensity : ') # Direction if FindSingleSubstring( line, 'Feature direction : ') is not None: direction = FindSingleSubstring( line, 'Feature direction : ') direction = direction[0:-1] # Line type if FindSingleSubstring( line, 'Feature type : ') is not None: line_type = FindSingleSubstring( line, 'Feature type : ') line_type = line_type[0:-1] tracks += [Track( time, position, general['image'], poles, 'Ambiguous', line_type, time_step=general['time_step'], pos_step=0.1067) ] return tracks def LoadTiff( fname): # load tiff file arr = plt.imread( fname) if len( arr.shape) == 3: arr = np.mean(arr,axis=2) return arr def FindSingleSubstring(strSearch, strLabel): # Find a single substring that contains strLabel. We delete the strLabel. if strSearch.find( strLabel) > -1: strMatch = strSearch.replace( strLabel, '') return strMatch return None def FindNumbers(strSearch, strLabel): # Find numbers from a string that starts with strLabel if strSearch.find( strLabel) > -1: strMatch = strSearch.replace( strLabel, '') strList = strMatch.split(',') nums = [float(i) for i in strList] return nums return None def PrintFile(fname): # Print all the information from a file to screen with open( fname) as f: print( f.read() ) ########################################## if __name__ == "__main__": print("no default implementation")
/src/Strain.py
#!/usr/bin/env python import os, pdb from .Load import Load from .Kymograph import Kymograph import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns import math, random import pickle from pathlib import Path from scipy import interpolate, signal class Strain: def __init__(self, trackpaths, label="xxx"): self.paths = trackpaths self.label = label self.LoadKymographs() self.tracks = [] def LoadKymographs(self): # Initialize kymograph classes for each loaded file self.kymographs = [] for pth in self.paths: print(pth) kname = pth.split("/")[-1] self.kymographs += [Kymograph(fname=pth)] def GetTracks(self, spindle_length=None): # Get tracks that lie within the spindle lengths defined. Trims the tracks # Combine tracks from all kymographs self.tracks = [] for kymo in self.kymographs: for track in kymo.tracks: trimmed = track.Trim(lrange=spindle_length) if trimmed is not None: self.tracks += [trimmed] def TrimUsingKmeansLabel(self, kmean_label): # load kmeans model kmeans_path = Path(self.paths[0]).parent / "kmeans.pickle" with open(kmeans_path, "rb") as f: model = pickle.load(f) for kymo in self.kymographs: # Only do stuff if its bipolar if len(kymo.poles) == 1: continue # Times time = np.array( sorted(np.hstack((kymo.poles[0].time, kymo.poles[1].time)))[1::10] ) time = np.linspace(time[0], time[-1], int(np.ceil(time[-1] - time[0]))) # Calculate spindle length, velocity, acceleration clen = np.absolute(kymo.poles[1].ifunc(time) - kymo.poles[0].ifunc(time)) cvel = list((clen[1:] - clen[:-1]) / (time[1:] - time[:-1])) cvel.insert(0, cvel[0]) cvel = np.array(cvel).reshape(-1, 1) # use velocity to predict label using fitted model labels_raw = model.predict(cvel) labels = self.ForceLabelsOneWay( self.SmoothClassifiedLabels(labels_raw, span=100) ) if np.max(labels) == 0 or np.max(clen) < 2: AB_transition = -1 if kmean_label == 0: time_keep = [time[0], time[-1]] elif kmean_label == 1: time_keep = [-1, -1] elif np.min(labels) == 1: AB_transition = -1 if kmean_label == 0: time_keep = [-1, -1] elif kmean_label == 1: time_keep = [time[0], time[-1]] else: AB_transition = time[np.where((labels == 1) & (clen > 2))[0][0]] if kmean_label == 0: time_keep = [time[0], AB_transition] elif kmean_label == 1: time_keep = [AB_transition, time[-1]] # if kymo.label == '/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/bipolar/wild type/MAX_1032_100msR_50msG_7Z_004_cell A_KYMOGRAPH': # pdb.set_trace() # print(AB_transition) # print(time_keep) print("Total time = {0:.2f} - {1:.2f}".format(time[0], time[-1])) print("Anaphase B = {0:.2f}".format(AB_transition)) print("Kmeans Label = {0}".format(kmean_label)) print("Time 2 keep = {0}".format(time_keep)) kymo.TrimBasedOnTime(time_keep) # SmoothClassifiedLabels {{{ def SmoothClassifiedLabels(self, label, span=100): # smooth_data {{{ def smooth_data(arr, span): re = np.convolve(arr, np.ones(span * 2 + 1) / (span * 2 + 1), mode="same") # The "my_average" part: shrinks the averaging window on the side that # reaches beyond the data, keeps the other side the same size as given # by "span" re[0] = np.average(arr[:span]) for i in range(1, span + 1): re[i] = np.average(arr[: i + span]) re[-i] = np.average(arr[-i - span :]) return re # }}} # Smoothed Labels label_new = np.where( np.array(smooth_data(label, min([span, int(len(label) / 2)]))) >= 0.5, 1, 0 ) # Once 1, always 1 # label_perm = [max(label_new[:1+jj]) for jj in range(len(label_new))] return label_new # }}} # ForceLabelsOneWay {{{ def ForceLabelsOneWay(self, label): labels = [np.max(label[: 1 + idx]) for idx in range(len(label))] return np.array(labels) # }}} def TossFarTracks(self, threshold): # Toss tracks that start above a threshold distance from the first pole self.tracks = [] for kymo in self.kymographs: tracksKeep = [] for track in kymo.tracks: if track.CalcPositionRelative()[0, 0] < threshold: tracksKeep.append(track) kymo.tracks = tracksKeep self.GetTracks() def TossCloseTracks(self, threshold): # Toss tracks that start above a threshold distance from the first pole self.tracks = [] for kymo in self.kymographs: tracksKeep = [] for track in kymo.tracks: if track.CalcPositionRelative()[0, 0] > threshold: tracksKeep.append(track) kymo.tracks = tracksKeep self.GetTracks() def GetSegmentsPAP(self): # Splits the tracks into segments and returns [poleward, antipoleward] segs = {"Poleward": [], "Antipoleward": []} bad_cnt = 0 good_cnt = 0 for track in self.tracks: segments, _ = track.SplitTrack() # Toss short-time segments for seg in segments: if seg.time[-1] - seg.time[0] < 2 * seg.time_step: bad_cnt += 1 elif seg.direction is "Poleward": good_cnt += 1 segs["Poleward"] += [seg] elif seg.direction is "Antipoleward": good_cnt += 1 segs["Antipoleward"] += [seg] # if good_cnt + bad_cnt > 0: # print('Major segments : {0} ({1:.2f}%)'.format(good_cnt, 100*good_cnt/(good_cnt+bad_cnt))) return [segs["Poleward"], segs["Antipoleward"]] def FilterSegments(self, segments): # Filter segments by imposing restrictions on velocity, run length and lifetimes # Velocities print("1") def GetRunLengths(self): # Get run lengths of poleward and antipoleward tracks (units nm) segsPAP = self.GetSegmentsPAP() runlens = [] for segs in segsPAP: runlen = [ 1000 * np.absolute( seg.CalcPositionRelative()[0, -1] - seg.CalcPositionRelative()[0, 0] ) for seg in segs ] runlens += [runlen] return runlens def GetVelocities(self): # Get velocities of poleward and antipoleward tracks (units nm/sec) segsPAP = self.GetSegmentsPAP() vels = [] for segs in segsPAP: vel = [1000 * seg.CalcVelocity()[0] for seg in segs] vels += [vel] return vels def GetLifetimes(self): # Get velocities of poleward and antipoleward tracks (units sec) segsPAP = self.GetSegmentsPAP() lifes = [] for segs in segsPAP: life = [seg.time[-1] - seg.time[0] for seg in segs] lifes += [life] return lifes def GetIntensities(self): # Get velocities of poleward and antipoleward tracks segsPAP = self.GetSegmentsPAP() ins = [] for segs in segsPAP: inss = [seg.CalcIntensity() for seg in segs] ins += [inss] return ins def GetTotalSwitches(self): # Get total switches out of state A to state B (states: poleward,antipoleward,inactive) switches = {"P": 0, "AP": 0, "I": 0} labs = ["P", "AP", "I"] for track in self.tracks: segments, trans = track.SplitTrack() for lab in labs: switches[lab] += sum([a for k, a in trans[lab].items()]) return switches def GetFractionKymographsWithMovement(self): # Get fraction of kymographs with movement events nMovement = 0 for kymo in self.kymographs: nAdd = 0 for track in kymo.tracks: trks_all, _ = track.SplitTrack() for mini in trks_all: if mini.direction != "Inactive": nAdd = 1 nMovement += nAdd return nMovement / len(self.kymographs) def GetDirectionalEventsPerMinute(self): # Get total number of directional events per minute events = {"P": 0, "AP": 0} events_per_min = {"P": 0, "AP": 0} for track in self.tracks: segs, _ = track.SplitTrack() for seg in segs: if seg.direction == "Poleward": events["P"] += 1 elif seg.direction == "Antipoleward": events["AP"] += 1 # total kymograph time time_total = 0 for kymo in self.kymographs: time_total += kymo.poles[0].time[-1] - kymo.poles[0].time[0] events_per_min["P"] = events["P"] / (time_total / 60) events_per_min["AP"] = events["AP"] / (time_total / 60) return events_per_min, events def GetDirectionalEventsPerMinutePerCell(self): # Get total number of directional events per minute per cell events = {"P": [], "AP": []} times = {"P": [], "AP": []} for kymo in self.kymographs: nP = 0 nAP = 0 for track in kymo.tracks: segs, _ = track.SplitTrack() for seg in segs: if seg.direction == "Poleward": nP += 1 elif seg.direction == "Antipoleward": nAP += 1 if nP + nAP > 0: time_total = kymo.poles[0].time[-1] - kymo.poles[0].time[0] else: time_total = 1 if time_total > 1: events["P"].append(nP) events["AP"].append(nAP) times["P"].append(time_total / 60) times["AP"].append(time_total / 60) # events['P'].append( nP/(time_total/60)) # events['AP'].append( nAP/(time_total/60)) return events, times def GetTotalDirectionalTime(self): # Find total number of directed time times = {"P": 0.01, "AP": 0.01, "I": 0.01} for track in self.tracks: # Split the track into linear tracks segs, _ = track.SplitTrack() # Calculate lifetimes and sum it all up for each direction of the track for seg in segs: if seg.direction == "Poleward": times["P"] += seg.time[-1] - seg.time[0] elif seg.direction == "Antipoleward": times["AP"] += seg.time[-1] - seg.time[0] elif seg.direction == "Inactive": times["I"] += seg.time[-1] - seg.time[0] else: raise ValueError("what is this unknown line direction") return times def GetTotalDirectionalTimeMinutes(self): # Find total number of directed time times = {"P": 0.01, "AP": 0.01, "I": 0.01} for track in self.tracks: # Split the track into linear tracks segs, _ = track.SplitTrack() # Calculate lifetimes and sum it all up for each direction of the track for seg in segs: if seg.direction == "Poleward": times["P"] += (seg.time[-1] - seg.time[0]) / 60 elif seg.direction == "Antipoleward": times["AP"] += (seg.time[-1] - seg.time[0]) / 60 elif seg.direction == "Inactive": times["I"] += (seg.time[-1] - seg.time[0]) / 60 else: raise ValueError("what is this unknown line direction") return times def GetSwitchFrequencyPerMinutePerCell(self): events = {"P": [], "AP": [], "I": []} times_all = {"P": [], "AP": [], "I": []} for kymo in self.kymographs: # Get total track time times = {"P": 10 ** -7, "AP": 10 ** -7, "I": 10 ** -7} for track in kymo.tracks: # Split the track into linear tracks segs, _ = track.SplitTrack() # Calculate lifetimes and sum it all up for each direction of the track for seg in segs: if seg.direction == "Poleward": times["P"] += (seg.time[-1] - seg.time[0]) / 60 elif seg.direction == "Antipoleward": times["AP"] += (seg.time[-1] - seg.time[0]) / 60 elif seg.direction == "Inactive": times["I"] += (seg.time[-1] - seg.time[0]) / 60 else: raise ValueError("what is this unknown line direction") # Get total switches switches = {"P": 0, "AP": 0, "I": 0} labs = ["P", "AP", "I"] for track in kymo.tracks: segments, trans = track.SplitTrack() for lab in labs: switches[lab] += sum([a for k, a in trans[lab].items()]) # switch frequencies for lab in labs: events[lab].append(switches[lab]) times_all[lab].append(times[lab]) return events, times_all def GetStartDistances(self): # Get start distances dist_P = [] dist_AP = [] for track in self.tracks: segs, _ = track.SplitTrack() for seg in segs: if seg.direction == "Poleward": dist_P.append(seg.CalcPositionRelative()[0, 0]) elif seg.direction == "Antipoleward": dist_AP.append(seg.CalcPositionRelative()[0, 0]) return [dist_P, dist_AP] def GetEndDistances(self): # Get end distances dist_P = [] dist_AP = [] for track in self.tracks: segs, _ = track.SplitTrack() for seg in segs: if seg.direction == "Poleward": dist_P.append(seg.CalcPositionRelative()[0, -1]) elif seg.direction == "Antipoleward": dist_AP.append(seg.CalcPositionRelative()[0, -1]) return [dist_P, dist_AP] def GetAverageDistances(self): # Get average distances segsPAP = self.GetSegmentsPAP() avgdists = [] for segs in segsPAP: avgdist = [] for seg in segs: ifunc = interpolate.interp1d( seg.time, seg.CalcPositionRelative()[0, :], kind="linear" ) avgdist.extend( [ 1000 * dd for dd in ifunc( np.arange(seg.time[0], seg.time[-1], seg.time_step) ) ] ) # avgdist = [np.mean(seg.CalcPositionRelative()[0, :]) for seg in segs] avgdists += [avgdist] return avgdists def GraphPAP_RunLengths(self, axs, **kwargs): lens_pap = self.GetRunLengths() # Toss runlengths over 2 micron for idx, lens in enumerate(lens_pap): ld = [i for i in lens if i < 2] lens_pap[idx] = ld self.GraphPAP(lens_pap, axs, unit=r"$\mu$" + "m", **kwargs) def GraphPAP_Velocities(self, axs, **kwargs): vels_pap = self.GetVelocities() # Convert vel from micron/sec to nm/sec. Toss vel over 200 micron/sec for idx, v in enumerate(vels_pap): ld = [i * 1000 for i in v if i < 0.2] vels_pap[idx] = ld self.GraphPAP(vels_pap, axs, unit="nm/s", **kwargs) def GraphPAP_Lifetimes(self, axs, **kwargs): lifes_pap = self.GetLifetimes() # toss lifetimes over 100 sec for idx, life in enumerate(lifes_pap): ld = [i for i in life if i < 100] lifes_pap[idx] = ld self.GraphPAP(lifes_pap, axs, unit="s", **kwargs) def GraphPAP_StartPosition(self, axs, **kwargs): startPos = self.GetStartDistances() self.GraphPAP(startPos, axs, unit=r"$\mu$" + "m", **kwargs) def GraphPAP_EndPosition(self, axs, **kwargs): endPos = self.GetEndDistances() self.GraphPAP(endPos, axs, unit=r"$\mu$" + "m", **kwargs) def GraphPAP( self, dat, axs, col="m", lab=None, unit="", xmax=None, xlab=None, ylab=None ): # pdb.set_trace() for datt, ax in zip(dat, axs): # Get x axis max if xmax is None: # xmax = math.ceil( max(datt)) xmax = max(datt) # find bin edges nbins = 16 bins = np.array([float(el) for el in range(nbins + 1)]) bins = np.dot(np.array(xmax / float(nbins)), bins) # Plot histogram aaa = ax.hist(datt, bins, edgecolor="k", color=col) # Add labels if xlab is not None: ax.set_xlabel(xlab) if ylab is not None: ax.set_ylabel(ylab) # Set x-limits ax.set_xlim([0, xmax]) # Set y-limits and ticks ymax = int(math.ceil(ax.get_ylim()[1] / 10) * 10) ax.set_yticks([0, ymax / 2, ymax]) ax.set_ylim([0, ymax + 2]) # Add strain label if lab is not None: ax.text( 0.95, 0.95, lab, ha="right", va="top", transform=ax.transAxes, fontsize=12, weight="roman", ) # Add median line ax.axvline(np.mean(datt), color="k", linestyle="dashed", linewidth=5) # Add median value label # mu = np.median( lens ) mu = np.mean(datt) form = "%.2f" mu_str = np.array2string(mu, formatter={"float_kind": lambda mu: form % mu}) std = np.std(datt) std = std / np.sqrt(len(datt)) std_str = np.array2string( std, formatter={"float_kind": lambda std: form % std} ) ax.text( 0.95, 0.85, r"{0} $\pm$ {1} {2}".format(mu_str, std_str, unit), ha="right", va="top", transform=ax.transAxes, fontsize=12, weight="roman", ) ax.text( 0.95, 0.75, r"N = {0}".format(len(datt)), ha="right", va="top", transform=ax.transAxes, fontsize=12, weight="roman", ) def PlotTrackByStates(self, cols, k=5): # Generate figure and axes and set colors fig, axs = plt.subplots(3, 1, figsize=(10, 6), sharex=True) axsd = {"Poleward": axs[0], "Antipoleward": axs[1], "Inactive": axs[2]} k = min([k, len(self.tracks)]) for idx, track in enumerate(random.sample(self.tracks, k)): minis, _ = track.SplitTrack() ax = axsd[minis[0].direction] # if self.label == "TD" and minis[0].direction == 'Antipoleward': # pdb.set_trace() # print('1') for trk in minis: pos_track_rel = trk.CalcPositionRelative() ax.plot( trk.time - track.time[0], np.absolute(pos_track_rel[0, :]), linewidth=0.5, color=cols[trk.direction], alpha=0.4, ) # Set x and y limits of subplots xl = (0, 0) yl = (0, 0) for ax in axs: xli = ax.get_xlim() yli = ax.get_ylim() xl = (min([xli[0], xl[0]]), max([xli[1], xl[1]])) yl = (min([yli[0], yl[0]]), max([yli[1], yl[1]])) # Force x limit xl = (xl[0], 400) # Legend axs[0].plot([], [], label="Poleward", color=cols["Poleward"]) axs[0].plot([], [], label="AntiPoleward", color=cols["Antipoleward"]) axs[0].plot([], [], label="Inactive", color=cols["Inactive"]) axs[0].legend(frameon=False) axs[2].set_xlabel("Time (s)") # axs[0].set_ylabel(r'Distance from SPB ($\mu m$)') axs[1].set_ylabel(r"Distance from SPB ($\mu m$)") # axs[2].set_ylabel(r'Distance from SPB ($\mu m$)') axs[0].set_ylim(bottom=-0.01, top=yl[1]) axs[1].set_ylim(bottom=-0.01, top=yl[1]) axs[2].set_ylim(bottom=-0.01, top=yl[1]) axs[0].set_xlim(left=-1, right=xl[1]) # axs[0].set_xlim(right=300) # WT monopolar # axs[0].set_xlim(right=400) # KLP5D monopolar # axs[0].xaxis.set_ticklabels([]) # axs[1].xaxis.set_ticklabels([]) plt.tight_layout() fig.savefig("tracks_by_state_{0}.pdf".format(self.label)) # fig.subplots_adjust(hspace = -0.2) plt.close() def PlotAllTracks(self, cols): fig, ax = plt.subplots(figsize=(4, 3)) # axsd = {'Poleward': axs[0], 'Antipoleward': axs[1], 'Inactive':axs[2]} for idx, track in enumerate(self.tracks): minis, _ = track.SplitTrack() for trk in minis: pos_track_rel = trk.CalcPositionRelative() ax.plot( trk.time - track.time[0], np.absolute(pos_track_rel[0, :]), linewidth=0.5, color=cols[trk.direction], alpha=0.6, ) # Set x and y limits of subplots # ymax=9 # xmax=1000 # ax.set_xlim(left=0.0,right=xmax) # ax.set_ylim(bottom=-0.1,top=ymax) ax.set_xlim(left=0.0) ax.set_ylim(bottom=-0.01) ymax = ax.get_ylim()[1] xmax = ax.get_xlim()[1] ax.set( xlabel="Time (s)", ylabel="Distance from SPB ($\mu m$)".format(len(self.tracks)), ) # Adding text inside a rectangular box by using the keyword 'bbox' plt.text(0.8 * xmax, 0.6 * ymax, "N = {0}".format(len(self.tracks)), fontsize=8) # Legend ax.plot([], [], label="Poleward", color=cols["Poleward"]) ax.plot([], [], label="Antipoleward", color=cols["Antipoleward"]) ax.plot([], [], label="Inactive", color=cols["Inactive"]) ax.legend() plt.tight_layout() plt.savefig("tracks_{0}.pdf".format(self.label)) # fig.subplots_adjust(hspace = -0.2) plt.close() if __name__ == "__main__": print("no default implementation")
/src/Track.py
#!/usr/bin/env python import os, pdb import numpy as np from scipy import interpolate, signal from .node_graph import Graph import matplotlib.pyplot as plt import math import uuid # Superclass for Poles and tracks that stores positional and intensity information class Feature: def __init__(self, time, position, image, time_step=1): self.time = np.array( time ) self.position = np.array( position ) self.id = uuid.uuid1() self.time_step = time_step self.pixel_time = self.time / self.time_step self.image = image # Resample data self.ResampleData() def ResampleData( self, sample_factor=3): # resample data based on time pixels # Define an interpolation function for positions ifunc_pos = interpolate.interp1d( self.time, self.position, kind='linear') # Define a grid of resampled time points self.time = np.linspace( self.time[0], self.time[-1], int( np.floor(max([ 2, sample_factor*(self.time[-1]-self.time[0])])) )) if len(self.time) == 1: pdb.set_trace() print('oops') self.position = ifunc_pos( self.time) # Class for a Pole class Pole(Feature): def __init__(self, time, position, image=[], time_step=1): Feature.__init__(self, time, position, image, time_step=time_step) # Define an interpolation/extrapolation function # self.ifunc = interpolate.interp1d(self.time, self.position, kind='linear', fill_value='extrapolate') self.ifunc = interpolate.interp1d(self.time, self.position, kind='linear', fill_value=(self.position[0], self.position[-1]), bounds_error=False) def Print(self): print('Pole :') print(' ID : {}'.format(self.id)) print(' Time : {}'.format( self.time)) print(' Position : {}'.format( self.position)) print('--------------------------------- ') def TrimBasedOnTime(self, time_keep): # Trim the pole to be inside the time range specified if np.all(time_keep == -1): return np.nan # Check if track exists between those times start_before = (self.time[0] < time_keep[0]) start_after = (self.time[0] > time_keep[1]) end_before = (self.time[-1] < time_keep[0]) end_after = (self.time[-1] > time_keep[1]) if start_before and end_before: return np.nan elif start_after and end_after: return np.nan # Get indices of times idx = np.argwhere( (self.time > time_keep[0]) & (self.time < time_keep[1]) ).T[0].tolist() if len(idx) < 3: return None idx = range( idx[0], idx[-1]+1) # Create the new trimmed pole polenew = Pole( self.time[idx], self.position[idx], self.image, time_step=self.time_step) # print(time_keep) # print(polenew.time[0]) # print(polenew.time[-1]) return polenew # Class for a Track: additionally stores associated poles and track direction class Track(Feature): def __init__(self, time, position, image, poles, direction, line_type, time_step=1, pos_step=1, kymo_file=None): Feature.__init__(self, time, position, image, time_step=time_step) self.poles = poles self.direction = direction self.line_type = line_type self.pos_step = pos_step self.kymo_file = kymo_file def CalcPositionPoleCurrent(self): # Get pole position at the current time (i.e at the times of the track) by using the interpolation/extrapolation function of the pole pos = np.zeros( (len(self.poles), np.size(self.position) ) ) for idx, pole in enumerate( self.poles) : pos[idx,:] = np.array( pole.ifunc( self.time) ) return pos def CalcPositionRelative(self): # Calculate track position relative to the pole pole = self.CalcPositionPoleCurrent() pos = np.zeros( np.shape(pole) ) for idx, ele in enumerate( pole): pos[idx,:] = np.abs( np.array( self.position - ele) ) return pos def CalcVelocity(self): # Calculate the velocity of this linear track pos = self.CalcPositionRelative() # Find Velocity vel = np.zeros( (len(self.poles)) ) for idx in range( len(self.poles)): vel[idx] = np.average( np.absolute( np.divide( np.diff( pos[idx,:]) , np.diff( self.time) ) ), weights = np.diff(self.time) ) return vel def CalcSpindleLength(self): # Calculate the spindle length if len(self.poles) != 2: return # Find the distance between the poles for the extent of this track leng = np.absolute( self.poles[0].ifunc( self.time) - self.poles[1].ifunc( self.time) ) return leng def CalcIntensity( self): # Interpolate to find the mean intensity of the track dimT = np.shape( self.image)[0] dimX = np.shape( self.image)[1] f = interpolate.interp2d( self.pos_step*np.arange(0,dimX), self.time_step*np.arange(0,dimT), self.image) intense = f(self.position, self.time) return np.mean(intense) def CheckViability(self): # Check track time is always increasing if np.any( np.diff( self.time) <= 0 ): return 0 return 1 def OrderPoles(self): # Order the poles with the first one being the closest one to the start of the track if len(self.poles) != 2: return pos = self.CalcPositionRelative() if np.absolute( pos[1,0] ) < np.absolute( pos[0,0]): self.poles = [self.poles[1], self.poles[0]] def Trim(self, lrange): # Trim the track to be inside the range specified if len( self.poles) == 1: return self if lrange is None: return self # Get indices of times when spindle length is between the given range values lens = self.CalcSpindleLength() idx = np.argwhere( (lens > lrange[0]) & (lens < lrange[1]) ).T[0].tolist() if len(idx) < 3: return None idx = range( idx[0], idx[-1]+1) # Create the new trimmed track tracknew = Track( self.time[idx], self.position[idx], self.image, self.poles, self.direction, self.line_type, time_step=self.time_step, pos_step=self.pos_step) return tracknew def TrimBasedOnTime(self, time_keep): # Trim the track to be inside the time range specified if np.all(time_keep == -1): return np.nan # Check if track exists between those times start_before = (self.time[0] < time_keep[0]) start_after = (self.time[0] > time_keep[1]) end_before = (self.time[-1] < time_keep[0]) end_after = (self.time[-1] > time_keep[1]) if start_before and end_before: return np.nan elif start_after and end_after: return np.nan # Get indices of times idx = np.argwhere( (self.time > time_keep[0]) & (self.time < time_keep[1]) ).T[0].tolist() if len(idx) < 3: return None idx = range( idx[0], idx[-1]+1) # Create the new trimmed track tracknew = Track( self.time[idx], self.position[idx], self.image, self.poles, self.direction, self.line_type, time_step=self.time_step, pos_step=self.pos_step) if tracknew is None: pdb.set_trace() print('b') return tracknew def SplitTrack(self, ipole=0, cutoff=0.003): # Spit curved track into multiple mini unidirectional segments # cutoff : units micron/sec switches = { 'P' : { 'P' : 0, 'AP': 0, 'I' : 0,}, 'AP' : { 'P' : 0, 'AP': 0, 'I' : 0,}, 'I' : { 'P' : 0, 'AP': 0, 'I' : 0,}, } # If linear directional track, cant split, so exit if self.direction != 'Ambiguous': return [self], switches # If linear ambiguous track, figure out direction, then exit if self.line_type == 'Line' and self.direction == 'Ambiguous': if len(self.CalcPositionRelative()) == 0: pdb.set_trace() print('a') position = np.absolute( self.CalcPositionRelative()[ipole,:] ) vel = np.mean( np.divide( np.diff( position) , np.diff(self.time) ) ) if abs( vel) < cutoff: self.direction = 'Inactive' elif vel > 0: self.direction = 'Antipoleward' elif vel < 0: self.direction = 'Poleward' return [self], switches # Get track position relative to the pole position = np.absolute( self.CalcPositionRelative()[ipole,:] ) # Use a rolling window to find velocities vel = FindGradientRollingWindow( position, self.time, window=16) # Assign states based on value of velocity at each timestep states = [] for v in vel: if abs( v) < cutoff: states += ['I'] elif v > 0: states += ['AP'] elif v < 0: states += ['P'] # set first state to second state. last state to second last state states[0] = states[1] states[-1] = states[-2] # Remove singly occuring states for i, state in enumerate(states): if i>0 and i< len(states)-1: if state != states[i-1] and state != states[i+1]: states[i] = states[i-1] # Count switches and get track indices p_state = 'XXX' track = { 'pos': [], 'time': [], 'dir':[] } idx = [0 , 0] for cnt, st in enumerate(states): if cnt == 0: p_state = st idx[0] = 0 continue if st == p_state: idx[1] += 1 if st != p_state: # store old stuff pos = self.position[ idx[0]: idx[1]+2] # pos.tolist() time = self.time[ idx[0]: idx[1]+2] # time.tolist() track['pos'] += [pos] track['time'] += [time] track['dir'] += [p_state] p_state = st # begin new idx[0] = cnt idx[1] = cnt # Store the last info if cnt == len(states)-1: pos = self.position[ idx[0]: idx[1]+1] # pos.tolist() time = self.time[ idx[0]: idx[1]+1] # time.tolist() track['pos'] += [pos] track['time'] += [time] track['dir'] += [p_state] # record switches for cnt, dd in enumerate( track['dir']): if cnt == 0: continue switches[ track['dir'][cnt-1]][track['dir'][cnt]] += 1 # Create track objects from the information segments = [] for time, pos, direc in zip( track['time'], track['pos'], track['dir']): if direc is 'P': direction = 'Poleward' elif direc is 'AP': direction = 'Antipoleward' elif direc is 'I': direction = 'Inactive' pos = pos.tolist() time = time.tolist() segments += [Track( time, pos, self.image, self.poles, direction, 'Line', time_step=self.time_step, pos_step=self.pos_step, kymo_file=self.kymo_file)] return segments, switches def DisplayTrack(self, ax=None): if ax is None: fig, ax = plt.subplots(figsize=(6,6)) # Display kymograph image ax.imshow( self.image) ax.plot( self.position/self.pos_step, self.time/self.time_step, color='red') def Print(self): print('Feature :') print(' ID : {}'.format(self.id)) print(' Direction : {}'.format( self.direction)) print(' Line type : {}'.format( self.line_type)) print(' Time : {}'.format( self.time)) print(' Position : {}'.format( self.position)) print('--------------------------------- ') def CountSwitches( states, switches): # Given a list of dt = np.mean( np.diff( t) ) nHalfWindow = int( np.ceil( t_window / (2*dt)) ) for i in range(len(t)): # get upper lower indices of window i_lb = max( [ 0, i-nHalfWindow]) i_ub = min( [ len(t), i+nHalfWindow]) # Find gradient diff = lambda xx : np.diff( xx[i_lb:i_ub]) grad = np.mean( np.divide( diff(x), diff(t) ) ) return grad def FindGradientRollingWindow( x, t, window=6): dt = np.mean( np.diff( t) ) nHalfWindow = int( np.ceil( window / (2*dt)) ) grads = [] for i in range(len(t)): # get upper lower indices of window i_lb = max( [ 0, i-nHalfWindow]) i_ub = min( [ len(t), i+nHalfWindow]) # Find gradient diff = lambda xx : np.diff( xx[i_lb:i_ub]) grads += [np.mean( np.divide( diff(x), diff(t) ) )] return grads if __name__ == "__main__": print('Not implemented')
/src/breakBipolar.py
#!/usr/bin/env python import os, pdb import matplotlib.pyplot as plt import numpy as np from pathlib import Path from .Kymograph import * import shutil from random import sample import seaborn as sns import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture from sklearn.metrics import silhouette_score import pickle ''' Name: breakBipolar.py Description: Plots the pole separation of a bipolar file ''' parent_path = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/temp') # Strain folders folds = ['wild type'] # folds = ['cut7-989TD,pkl1D,klp2D'] # folds = ['wild type','cut7-989TD,pkl1D,klp2D'] # savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/result_wt') # savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/result_mutant') savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/blahblah') # slope_window = 25 # nsamples = 10 # thresh = [0.008, 0.005] # get_data_from_files {{{ def get_data_from_files(parent_path, folds): # lists times = [] length = [] velocity = [] acceleration = [] strain_tag = [] file_tag = [] # folds is list containing different strain names for jj,jfold in enumerate(folds): # txt files mainpath = parent_path / jfold files2break = mainpath.glob('*txt') for jfile, fil in enumerate(files2break): kymo = Kymograph(fname=str(fil)) # Only do stuff if its bipolar if len(kymo.poles) == 2: # Times time = np.array( sorted( np.hstack( (kymo.poles[0].time, kymo.poles[1].time) ) )[1::10] ) time = np.linspace(time[0], time[-1], int(np.ceil(time[-1]-time[0]))) # times.append(time) # Calculate spindle length, velocity, acceleration clen = np.absolute( kymo.poles[1].ifunc( time)- kymo.poles[0].ifunc(time)) cvel = list( (clen[1:]-clen[:-1]) / (time[1:] - time[:-1]) ) cvel.insert(0, cvel[0]) cvel = np.array(cvel) cacc = list( (cvel[1:]-cvel[:-1]) / (time[1:] - time[:-1]) ) cacc.insert(0, cacc[0]) cacc = np.array(cacc) for jt in range(len(time)): times.append(time[jt]) length.append(clen[jt]) velocity.append(cvel[jt]) acceleration.append(cacc[jt]) strain_tag.append( jfold) file_tag.append( os.path.basename(kymo.label) ) df = pd.DataFrame({'strain':strain_tag, 'index':file_tag, 'time':times, 'length':length, 'velocity':velocity, 'acceleration':acceleration, }) return df # }}} # pair_plot {{{ def pair_plot(datframe, vars_compare, label=None, savePath=None, title=''): fig,ax = plt.subplots(figsize=(12,9)) if label is None: sns.pairplot(datframe, vars=vars_compare, plot_kws=dict(marker="+", s=50,linewidth=3, alpha=0.1), diag_kind='kde', palette='Dark2', height=3) else: sns.pairplot(datframe, vars=vars_compare, hue=label, plot_kws=dict(marker="+", s=50,linewidth=3, alpha=0.1), diag_kind='kde', palette='Dark2', height=3) plt.tight_layout() plt.title(title) if savePath is not None: plt.savefig(savePath) plt.close() # }}} # Kmeans {{{ def do_KMeans(df, vars_compare, n_clusters=2, display=True, savePath=None): data = df[vars_compare].to_numpy() # scaler = StandardScaler() # X_std = scaler.fit_transform(data) X_std = data print('KMeans clustering: N_clusters = {}'.format(n_clusters)) kmeans = KMeans(n_clusters=n_clusters, init='k-means++', max_iter=300, n_init=10, random_state=10) model = kmeans.fit(X_std) labels = model.predict(X_std) sil_score = silhouette_score(X_std,labels) print('Silhouette Score = {0:.3f}'.format(sil_score)) df['label'] = labels df = labels_ordered(df,'length') if display and savePath is not None: pair_plot(df, vars_compare, label="label",title='kmeans', savePath=savePath) return df, model # }}} # GMM {{{ def do_GaussianMixtureModel(df, vars_compare, n_clusters=2, display=True, savePath=None): data=df[vars_compare].to_numpy() scaler = StandardScaler() X_std = scaler.fit_transform(data) # define the model print('Gaussian Mixture Model: N_components = {}'.format(n_clusters)) model = GaussianMixture(n_components=n_clusters).fit(X_std) labels = model.predict(X_std) sil_score = silhouette_score(X_std,labels) print('Silhouette Score = {0:.3f}'.format(sil_score)) df['label'] = labels df = labels_ordered(df,'length') if display and savePath is not None: pair_plot(df, vars_compare, label="label",title='kmeans', savePath=savePath) return df, model # }}} # LabelsOrdered {{{ def labels_ordered( df, ref_name): label_list_new = [] # Get unique labels and the mean values of the reference variable labels = sorted(df.label.unique()) mu = np.zeros( len(labels) ) for jlab in range(len(labels)): mu[jlab] = df[df.label == labels[jlab]][ref_name].mean() # Create mapping from old_label to new labels_new = [x for _,x in sorted( list(zip(mu,labels)), key=lambda x:x[0])] # pdb.set_trace() mapping = {k:v for k,v in zip(labels,labels_new)} print(mapping) for jlab in df.label: label_list_new.append( mapping[jlab]) df.label = label_list_new return df # }}} # plotClassifiedTracks {{{ def plotClassifiedTracks(df, saveParent=None, nSamples=50, model=None): # for each unique strain, make a plot # extract tracks strains = df.strain.unique().tolist() for strain in strains: fig,(ax0,ax1,ax2) = plt.subplots(1,3,figsize=(18,4.5), sharey=True) indices = df[df.strain == strain]['index'].unique() # Pick nSamples indices at random # indices2plot = sample(list(indices), nSamples) indices2plot = indices[:nSamples] # plot each track for ind in indices2plot: # get track to plot track = df[ (df['strain'] == strain) & (df['index'] == ind)] time = np.array(track.time) length = np.array(track.length) label = np.array(track.label) # Plot Axis 0 ax0.plot(time, length, alpha=0.5, color='k', lw=2) # Plot Axis 1 # len_group0 and len_group1 len_0 = length.copy() len_1 = length.copy() idx0 = np.where(label == 0)[0] idx1 = np.where(label == 1)[0] len_0[idx1] = np.nan len_1[idx0] = np.nan ax1.plot(time, len_0, alpha=0.5, lw=2, color='green') ax1.plot(time, len_1, alpha=0.5, lw=2,color='purple') # Plot Axis 2 label_new = np.array(ForceLabelsOneWay( SmoothClassifiedLabels(label, span=100) ) ) len_0 = length.copy() len_1 = length.copy() idx0 = np.where(label_new == 0)[0] idx1 = np.where(label_new == 1)[0] len_0[idx1] = np.nan len_1[idx0] = np.nan # pdb.set_trace() ax2.plot(time, len_0, alpha=0.5, lw=2,color='green') ax2.plot(time, len_1, alpha=0.5, lw=2,color='purple') # Labels/Legend Axis 0 ax0.set(ylabel=r'Spindle Length $(\mu m)$', xlabel='Time (s)') # Labels/Legend Axis 1 ax1.plot([],[], alpha=0.7, color='green', label='Group 0') ax1.plot([],[], alpha=0.7, color='purple', label='Group 1') ax1.legend() ax1.set(xlabel='Time (s)') # Labels/Legend Axis 2 ax2.plot([],[], alpha=0.7, color='green', label='Group 0') ax2.plot([],[], alpha=0.7, color='purple', label='Group 1') ax1.legend() ax2.set(xlabel='Time (s)') plt.suptitle(strain) plt.tight_layout() if saveParent is not None: if model is None: plt.savefig( saveParent / 'tracks_{0}.pdf'.format(strain)) else: plt.savefig( saveParent / 'tracks_{0}_{1}.pdf'.format(model,strain)) plt.close() # }}} # SmoothClassifiedLabels {{{ def SmoothClassifiedLabels(label, span=100): # smooth_data {{{ def smooth_data(arr, span): re = np.convolve(arr, np.ones(span * 2 + 1) / (span * 2 + 1), mode="same") # The "my_average" part: shrinks the averaging window on the side that # reaches beyond the data, keeps the other side the same size as given # by "span" re[0] = np.average(arr[:span]) for i in range(1, span + 1): re[i] = np.average(arr[:i + span]) re[-i] = np.average(arr[-i - span:]) return re # }}} # Smoothed Labels label_new = np.where(np.array( smooth_data( label,min([span, int(len(label)/2)]))) >= 0.5, 1, 0) # Once 1, always 1 # label_perm = [max(label_new[:1+jj]) for jj in range(len(label_new))] return label_new # }}} # ForceLabelsOneWay {{{ def ForceLabelsOneWay( label): labels = [np.max(label[:1+idx]) for idx in range(len(label))] return np.array(labels) # }}} if not Path.exists( savepath): os.mkdir( savepath) # Load data into dataframe df = get_data_from_files(parent_path, folds) names = ['velocity'] # Display (pre clustering) pair_plot(df, names, savePath=savepath/'features_grid_raw.png') # Kmeans df_kmean, model_kmean = do_KMeans(df.copy(),names, savePath=savepath/'features_grid_kmeans.png') print(df_kmean.groupby('label').mean() ) plotClassifiedTracks(df_kmean, model='kmeans',saveParent=savepath) # Save model with open(parent_path / 'kmeans.pickle', 'wb') as f: pickle.dump(model_kmean, f) # GMM # df_gmm, model_gmm = do_GaussianMixtureModel(df.copy(),names, savePath=savepath/'features_grid_gmm.png') # print(df_gmm.groupby('label').mean() ) # plotClassifiedTracks(df_gmm, model='gmm',saveParent=savepath) # if vel_thresh[0]==1: # anaphase_time = 'Always' # elif vel_thresh[0]==0 and vel_thresh[-1]==1: # anaphase_time = timelist[ np.where(np.array(vel_thresh)>0.5)[0][0] ] # elif vel_thresh[-1]==0: # anaphase_time = 'Never' # # anaphase_time = timelist[ np.where(np.array(vel_thresh)>0.5)[0][0] ] # print( '{0} --> Anaphase B Transition = {1} sec'.format( files2break[idx].stem,anaphase_time))
/src/node_graph.py
#!/usr/bin/env python # Python program to print connected # components in an undirected graph # This code is contributed by Abhishek Valsan # Updated by Saad Ansari for a directed graph application import pdb class Graph: # init function to declare class variables def __init__(self,V): self.V = V self.next = [[] for i in range(V)] self.prev = [[] for i in range(V)] def VisitNext(self, temp, v, visited): # Visited this node visited[v] = True # Store the vertex to list try: if temp[-1] != v: temp.append(v) except: temp.append(v) # Repeat for all vertices adjacent # to this vertex v for i in self.next[v]: if visited[i] == False: # Update the list temp= self.VisitNext(temp, i, visited) return temp def VisitPrev(self, temp, v, visited): # Visited this node visited[v] = True # Store the vertex to list try: if temp[0] != v: temp.insert(0, v) except: temp.insert(0, v) # Repeat for all vertices adjacent # to this vertex v for i in self.prev[v]: if visited[i] == False: # Update the list temp= self.VisitPrev(temp, i, visited) return temp # method to add an directed edge def addEdge(self, v, w): self.next[v].append(w) self.prev[w].append(v) # Method to retrieve connected components # in a directed graph def connectedComponents(self): visited = [] cc = [] for i in range(self.V): visited.append(False) for v in range(self.V): if visited[v] == False: temp = [] temp= self.VisitNext( temp, v, visited) cc.append( self.VisitPrev( temp, v, visited) ) return cc # Driver Code if __name__=="__main__": # Create a graph given in the above diagram # 5 vertices numbered from 0 to 4 g = Graph(7); g.addEdge(1, 0) g.addEdge(3, 4) g.addEdge(0, 6) g.addEdge(5, 1) cc = g.connectedComponents() print("Following are connected components") print(cc)
/src/smooth_test.py
#!/usr/bin/env python import os, pdb import matplotlib.pyplot as plt import numpy as np from pathlib import Path from .Kymograph import * import shutil ''' Name: breakBipolar.py Description: Plots the pole separation of a bipolar file ''' folds = ['wild type'] savepath = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/Analysis/result_smoothing') parent_path = Path('/Users/saadjansari/Documents/Projects/ImageAnalysis/KymoAnalysis/data/temp') def smooth_data(arr, span): re = np.convolve(arr, np.ones(span * 2 + 1) / (span * 2 + 1), mode="same") # The "my_average" part: shrinks the averaging window on the side that # reaches beyond the data, keeps the other side the same size as given # by "span" re[0] = np.average(arr[:span]) for i in range(1, span + 1): re[i] = np.average(arr[:i + span]) re[-i] = np.average(arr[-i - span:]) return re if not Path.exists( savepath): os.mkdir( savepath) for jj,jfold in enumerate(folds): print('Data: {0}'.format(jfold)) mainpath = parent_path / jfold # txt files files2break = mainpath.glob('*txt') # Pole separation vs time print('Calculating pole separations...') for jj, fil in enumerate(files2break): # print(fil) kymo = Kymograph(fname=str(fil)) if len(kymo.poles) == 2: fig, ax = plt.subplots() time = np.array( sorted( np.hstack( (kymo.poles[0].time, kymo.poles[1].time) ) )[1::10] ) time = np.linspace(time[0], time[-1], int(np.ceil(time[-1]-time[0]))) spindleLength = np.array( np.absolute( kymo.poles[1].ifunc(time)- kymo.poles[0].ifunc(time)) ) slope_windows = [5,25,50] for slope in slope_windows: spindleLength_cnv = smooth_data(spindleLength, slope) ax.plot(time, spindleLength_cnv, label='Window = {0}'.format(slope)) ax.plot(time, spindleLength, 'k:', lw=2,label='Original') ax.legend() ax.set(xlabel='Time (s)', ylabel=r'Pole separation ($\mu m$)') plt.tight_layout() plt.savefig(savepath / 'smoothing_{0}_{1}.pdf'.format(mainpath.stem, jj)) plt.close(fig)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
Edinburgh-Genome-Foundry/Geneblocks
refs/heads/master
{"/geneblocks/DiffBlocks/DiffBlocks.py": ["/geneblocks/DiffBlocks/DiffBlock.py", "/geneblocks/Location.py", "/geneblocks/DiffBlocks/DiffRecordTranslator.py", "/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py", "/geneblocks/DiffBlocks/diffblocks_tools.py"], "/geneblocks/DiffBlocks/__init__.py": ["/geneblocks/DiffBlocks/DiffBlock.py", "/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/DiffBlocks/DiffRecordTranslator.py"], "/geneblocks/DiffBlocks/diffblocks_tools.py": ["/geneblocks/DiffBlocks/DiffBlock.py", "/geneblocks/Location.py"], "/tests/test_base_diff_cases.py": ["/geneblocks/DiffBlocks/DiffBlock.py", "/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/biotools.py"], "/tests/test_scenarios.py": ["/geneblocks/DiffBlocks/DiffBlock.py", "/geneblocks/Location.py", "/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py"], "/geneblocks/DiffBlocks/DiffBlock.py": ["/geneblocks/Location.py", "/geneblocks/biotools.py"], "/geneblocks/CommonBlocks/CommonBlocks.py": ["/geneblocks/CommonBlocks/commonblocks_tools.py", "/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocksRecordTranslator.py"], "/examples/complex_sequences.py": ["/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py"], "/examples/diff_blocks.py": ["/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/biotools.py"], "/geneblocks/__init__.py": ["/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/DiffBlocks/DiffRecordTranslator.py", "/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py", "/geneblocks/utils.py"], "/tests/test_complex_sequences.py": ["/geneblocks/DiffBlocks/DiffBlocks.py", "/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py"], "/geneblocks/CommonBlocks/commonblocks_tools.py": ["/geneblocks/biotools.py"], "/geneblocks/utils.py": ["/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py"], "/examples/features_transfer.py": ["/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py"], "/examples/common_blocks.py": ["/geneblocks/biotools.py", "/geneblocks/CommonBlocks/CommonBlocks.py"], "/geneblocks/Location.py": ["/geneblocks/biotools.py"], "/geneblocks/sequence_modification_utils.py": ["/geneblocks/biotools.py"], "/geneblocks/CommonBlocks/__init__.py": ["/geneblocks/CommonBlocks/CommonBlocks.py"], "/tests/test_utils.py": ["/geneblocks/utils.py"]}
└── ├── examples │ ├── common_blocks.py │ ├── complex_sequences.py │ ├── diff_blocks.py │ └── features_transfer.py ├── geneblocks │ ├── CommonBlocks │ │ ├── CommonBlocks.py │ │ ├── CommonBlocksRecordTranslator.py │ │ ├── __init__.py │ │ └── commonblocks_tools.py │ ├── DiffBlocks │ │ ├── DiffBlock.py │ │ ├── DiffBlocks.py │ │ ├── DiffRecordTranslator.py │ │ ├── __init__.py │ │ └── diffblocks_tools.py │ ├── Location.py │ ├── __init__.py │ ├── biotools.py │ ├── sequence_modification_utils.py │ └── utils.py ├── setup.py └── tests ├── test_base_diff_cases.py ├── test_complex_sequences.py ├── test_scenarios.py └── test_utils.py
/examples/common_blocks.py
from geneblocks import CommonBlocks from geneblocks.biotools import reverse_complement, random_dna_sequence C1, A, B, C2, C3, D, E, F = [random_dna_sequence(100 * L) for L in range(1, 9)] sequences = { "a": C1 + A + C2, "b": B + D + C2 + C3, "c": E + C1 + C2, "d": C2 + C1 + F + C3, "e": C3 + reverse_complement(C2 + C1) } common_blocks = CommonBlocks.from_sequences(sequences) axes = common_blocks.plot_common_blocks() axes[0].figure.savefig("common_blocks.png", bbox_inches="tight")
/examples/complex_sequences.py
from geneblocks import DiffBlocks, CommonBlocks, random_dna_sequence import geneblocks.sequence_modification_utils as smu import matplotlib.pyplot as plt import numpy numpy.random.seed(1) # ensures the sequences will be the same at each run # GENERATE 2 "SISTER" SEQUENCES FOR THE EXAMPLE seq1 = random_dna_sequence(50000) seq1 = smu.copy(seq1, 25000, 30000, 50000) seq2 = seq1 seq2 = smu.insert(seq2, 39000, random_dna_sequence(100)) seq2 = smu.insert(seq2, 38000, random_dna_sequence(100)) seq2 = smu.reverse(seq2, 30000, 35000) seq2 = smu.swap(seq2, (30000, 35000), (45000, 480000)) seq2 = smu.delete(seq2, 20000, 2000) seq2 = smu.insert(seq2, 10000, random_dna_sequence(2000)) seq2 = smu.insert(seq2, 0, 1000*"A") # FIND COMMON BLOCKS AND DIFFS common_blocks = CommonBlocks.from_sequences({'seq1': seq1, 'seq2': seq2}) diff_blocks = DiffBlocks.from_sequences(seq1, seq2).merged() # PLOT EVERYTHING fig, axes = plt.subplots(3, 1, figsize=(16, 8)) common_blocks.plot_common_blocks(axes=axes[:-1]) diff_blocks.plot(ax=axes[-1], separate_axes=False) axes[-1].set_xlabel("Changes in seq2 vs. seq1") fig.savefig("complex_sequences.png", bbox_inches='tight')
/examples/diff_blocks.py
import os from geneblocks import DiffBlocks, load_record seq_1 = load_record(os.path.join("sequences", "sequence1.gb")) seq_2 = load_record(os.path.join("sequences", "sequence2.gb")) diff_blocks = DiffBlocks.from_sequences(seq_1, seq_2) ax1, ax2 = diff_blocks.plot(figure_width=8) ax1.figure.savefig("diff_blocks.png", bbox_inches='tight')
/examples/features_transfer.py
import os from geneblocks import CommonBlocks, load_record from dna_features_viewer import BiopythonTranslator import matplotlib.pyplot as plt # LOAD THE TWO RECORDS part_path = os.path.join("sequences", "features_transfer", "part.gb") part = load_record(part_path, name="part") plasmid_path = os.path.join( "sequences", "features_transfer", "plasmid_to_annotate.gb" ) plasmid = load_record(plasmid_path, name="plasmid") # TRANSFER THE FEATURES blocks = CommonBlocks.from_sequences([part, plasmid]) new_records = blocks.copy_features_between_common_blocks(inplace=False) annotated_plasmid = new_records["plasmid"] # record with all features # PLOT ALL RECORDS fig, (ax0, ax1, ax2) = plt.subplots(3, 1, sharex=True, figsize=(7, 5)) ax0.set_title("Record 1: Annotated part", loc="left") ax1.set_title("Record 2: Partially annotated plasmid", loc="left") ax2.set_title("Record 2 (after) with all annotations", loc="left") class SpecialBiopythonTranslator(BiopythonTranslator): def compute_feature_color(self, f): original_record = "".join(f.qualifiers.get("original_record", "")) return "#ffafaf" if (original_record == "part") else "#afafff" tr = SpecialBiopythonTranslator() tr.translate_record(part).plot(ax0, with_ruler=False, x_lim=(0, len(plasmid))) tr.translate_record(plasmid).plot(ax1, with_ruler=False) tr.translate_record(annotated_plasmid).plot(ax2, with_ruler=False) fig.tight_layout() fig.subplots_adjust(hspace=0.6) fig.savefig("features_transfer.png", dpi=125)
/geneblocks/CommonBlocks/CommonBlocks.py
"""Defines central class BlockFinder.""" import itertools from collections import OrderedDict from copy import deepcopy from .CommonBlocksRecordTranslator import CommonBlocksRecordTranslator import matplotlib.pyplot as plt import matplotlib.cm as cm from ..biotools import annotate_record from .commonblocks_tools import ( format_sequences_as_dicts, select_common_blocks, find_homologies_between_sequences, ) # TODO: Simplify the code by using the new Location class in Location.py class CommonBlocks: """Class to represent a set of common blocks from different sequences. Create with ``CommonBlocks.from_sequences``: >>> common_blocks = CommonBlocks.from_sequences({'s1': 'ATGC...'}) Parameters ---------- common_blocks A dictionary of the sequences to compare, of the form {sequence_name: ATGC_sequence_string} or a list of records, all with different IDs. records A dictionary of the Biopython records of the sequences {record_id: record}. """ def __init__(self, common_blocks, records): """Initialize, compute best blocks.""" self.common_blocks = common_blocks self.records = records @staticmethod def from_sequences( sequences, block_selection_method="most_coverage_first", include_self_homologies=True, min_block_size=80, max_block_size=None, ): sequences_dict, records_dict = format_sequences_as_dicts(sequences) homologies_dict = find_homologies_between_sequences( sequences_dict, min_size=min_block_size, max_size=max_block_size, include_self_homologies=include_self_homologies, ) common_blocks = select_common_blocks( homologies_dict, sequences_dict, min_size=min_block_size, method=block_selection_method, ) return CommonBlocks(common_blocks=common_blocks, records=records_dict) def compute_unique_blocks(self): """Return a dictionary listing unique blocks by sequence. The unique blocks are the blocks between the selected common blocks. The result is of the form {seq: [(start, end), (start2, end2), ...]} """ unique_blocks = OrderedDict() for seqname, rec in self.sequences_with_annotated_blocks().items(): blocks_locations = ( [(0, 0)] + sorted( [ (f.location.start, f.location.end) for f in rec.features if f.qualifiers.get("is_block", False) ] ) + [(len(rec), len(rec))] ) unique_blocks[seqname] = [ (end1, start2) for (_, end1), (start2, _) in zip( blocks_locations, blocks_locations[1:] ) if (start2 - end1) > 1 ] return unique_blocks def common_blocks_to_csv(self, target_file=None): """Write the common blocks into a CSV file. If a target CSV file is provided the result is written to that file. Otherwise the result is returned as a string. The columns of the CSV file are "block", "size", "locations", and sequence. """ csv_content = "\n".join( ["block;size;locations;sequence"] + [ ";".join( [ block_name, str(len(data["sequence"])), " ".join( [ "%s(%d, %d, %d)" % (cst, start, end, strand) for (cst, (start, end, strand)) in data["locations"] ] ), data["sequence"], ] ) for block_name, data in self.common_blocks.items() ] ) if target_file: with open(target_file, "w+") as f: f.write(csv_content) else: return csv_content def common_blocks_records(self): """Return all common blocks as a list of Biopython records. """ if self.records is None: raise ValueError("") records = [] for block_name, data in self.common_blocks.items(): cst, (start, end, strand) = data["locations"][0] record = self.records[cst][start:end] if strand == -1: record = record.reverse_complement() record.id = record.name = block_name records.append(record) return records def unique_blocks_records(self, target_file=None): """Return all unique blocks as a list of Biopython records.""" if self.records is None: raise ValueError("") records = [] for seqname, locations in self.compute_unique_blocks().items(): for i, (start, end) in enumerate(locations): record = self.records[seqname][start:end] record.id = "%s_%03d" % (seqname, i) records.append(record) return records def sequences_with_annotated_blocks(self, colors="auto"): """Return a list of Biopython records representing the sequences with annotations indicating the common blocks. Parameter ``colors`` is either a list of colors or "auto" for the default. """ records = deepcopy(self.records) if colors == "auto": colors = itertools.cycle([cm.Paired(0.21 * i % 1.0) for i in range(30)]) blocks_and_colors = zip(self.common_blocks.items(), colors) for (name, data), color in blocks_and_colors: for (seqname, location) in data["locations"]: annotate_record( records[seqname], location, feature_type="misc_feature", is_block=True, label=name, color=color, ) return records def plot_common_blocks( self, colors="auto", axes=None, figure_width=10, ax_height=2 ): """Plot the common blocks found on vertically stacked axes. The axes on which the plots are drawn are returned at the end. Parameters ---------- colors Either a list of colors to use for blocks or "auto" for the default. axes A list of matplotlib axes on which to plot, or None for new axes. figure_width Width of the final figure in inches. ax_eight Height of each plot. """ translator = CommonBlocksRecordTranslator() records = self.sequences_with_annotated_blocks(colors=colors) if axes is None: fig, axes = plt.subplots( len(self.records), 1, facecolor="white", sharex=True, figsize=(figure_width, ax_height * len(self.records)), ) else: fig = axes[0].figure for (ax, (seqname, record)) in zip(axes, records.items()): gr_record = translator.translate_record(record) gr_record.plot( ax, x_lim=(0, max([len(rec) for rec in self.records.values()])), with_ruler=(ax == axes[-1]), ) ax.set_ylim(top=ax.get_ylim()[1]) ax.set_title(seqname, loc="left", fontdict=dict(weight="bold")) # fig.tight_layout() return axes def copy_features_between_common_blocks(self, inplace=False): def extract_subrecord(record, location): start, end, strand = location record = record[start:end] if strand == -1: record = record.reverse_complement() return record def extract_features(record, offset, reverse=False): if reverse: record = record.reverse_complement() new_features = [deepcopy(f) for f in record.features] for f in new_features: f.qualifiers["original_record"] = record.id for f in new_features: f.location += offset return new_features if inplace: records = self.records else: records = deepcopy(self.records) for data in self.common_blocks.values(): locations = data["locations"] subrecords = { rec_id: extract_subrecord(records[rec_id], location) for rec_id, location in data["locations"] } for l1, l2 in itertools.combinations(locations, 2): for ((id1, loc1), (id2, __loc2)) in ((l1, l2), (l2, l1)): start1, __end1, strand1 = loc1 # start2, end2, strand2 = loc2 records[id1].features += extract_features( subrecords[id2], offset=start1, reverse=(strand1 == -1) ) return records
/geneblocks/CommonBlocks/CommonBlocksRecordTranslator.py
from dna_features_viewer import BiopythonTranslator class CommonBlocksRecordTranslator(BiopythonTranslator): ignored_features_types = ("diff_equal",) default_box_color = None def compute_feature_color(self, f): if f.qualifiers.get("is_block", False): return BiopythonTranslator.compute_feature_color(self, f) else: return "white" @staticmethod def compute_feature_box_linewidth(f): return 1 if f.qualifiers.get("is_block", False) else 0 @staticmethod def compute_feature_fontdict(f): return {"fontsize": 12 if f.qualifiers.get("is_block", False) else 9}
/geneblocks/CommonBlocks/__init__.py
from .CommonBlocks import CommonBlocks __all__ = ['CommonBlocks']
/geneblocks/CommonBlocks/commonblocks_tools.py
"""Algorithmic methods for the selection of common blocks in DiffBlocks - select_common_blocks - - segments_difference """ import re import tempfile import subprocess from collections import defaultdict, OrderedDict import numpy as np from ..biotools import reverse_complement, sequence_to_record def format_sequences_as_dicts(sequences): """Standardize different formats into a single one. The ``sequences`` can be either: - A list [('sequence_id', 'ATGC...'), ('sequence_2', ...)] - A list of Biopython records (all with different IDs) - A dict {'sequence_id': "ATGC..."} - A dict {'sequence_id': biopython_record} The output is a tuple (sequences_dict, records_dict), where - sequences_dict is of the form {'sequence_id': 'ATGC...'} - sequences_dict is of the form {'sequence_id': 'ATGC...'} """ if isinstance(sequences, (list, tuple)): if hasattr(sequences[0], "seq"): # SEQUENCES = LIST OF RECORDS records_dict = OrderedDict([(record.id, record) for record in sequences]) sequences_dict = OrderedDict( [(record.id, str(record.seq).upper()) for record in sequences] ) else: # SEQUENCES = LIST OF ATGC STRINGS sequences_dict = OrderedDict(sequences) if isinstance(list(sequences_dict.values())[0], str): records_dict = OrderedDict( [ (name, sequence_to_record(seq, name=name)) for name, seq in sequences_dict.items() ] ) else: records_dict = sequences elif hasattr(list(sequences.values())[0], "seq"): # SEQUENCES = DICT {SEQ_ID: RECORD} records_dict = OrderedDict(sorted(sequences.items())) sequences_dict = OrderedDict( [ (record_id, str(record.seq).upper()) for record_id, record in sequences.items() ] ) else: # SEQUENCES = DICT {SEQ_ID: ATGC} sequences_dict = OrderedDict(sorted(sequences.items())) records_dict = OrderedDict( [ (name, sequence_to_record(seq, name=name)) for name, seq in sequences.items() ] ) return sequences_dict, records_dict def segments_difference(segment, subtracted): """Return the difference between segment (start, end) and subtracted. The result is a list containing either zero, one, or two segments of the form (start, end). Examples -------- >>> segment=(10, 100), subtracted=(0, 85) => [(85, 100)] >>> segment=(10, 100), subtracted=(40, 125) => [(10, 40)] >>> segment=(10, 100), subtracted=(30, 55) => [(10, 30), (55, 100)] >>> segment=(10, 100), subtracted=(0, 150) => [] """ seg_start, seg_end = segment sub_start, sub_end = subtracted result = [] if sub_start > seg_start: result.append((seg_start, min(sub_start, seg_end))) if sub_end < seg_end: result.append((max(seg_start, sub_end), seg_end)) return sorted(list(set(result))) def find_homologies_between_sequences( sequences, min_size=0, max_size=None, include_self_homologies=True ): """Return a dict listing the locations of all homologies between sequences. The result is a dict of the form below, where the sequence identifiers are used as keys. >>> { >>> 'seq_1': { >>> (start1, end1): [('seq2_5', _start, _end), ('seq_3', )...] >>> (start2, end2): ... >>> } >>> } Parameters ---------- sequences A dict {'sequence_id': 'ATTGTGCAG...'}. min_size, max_size Minimum and maximum size outside which homologies will be ignored. include_self_homologies If False, self-homologies will be removed from the list. """ # BLAST THE SEQUENCES USING NCBI-BLAST temp_fasta_path = tempfile.mktemp(".fa") with open(temp_fasta_path, "w+") as f: f.write( "\n\n".join( ["> %s\n%s" % (name, seq.upper()) for name, seq in sequences.items()] ) ) proc = subprocess.Popen( [ "blastn", "-query", temp_fasta_path, "-subject", temp_fasta_path, "-perc_identity", "100", "-dust", "no", "-evalue", "1000000000000000", "-culling_limit", "10", "-ungapped", "-outfmt", "6 qseqid qstart qend sseqid sstart send", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) result, __blast_err = proc.communicate() # PARSE THE RESULT FROM BLAST parsing = [line.split("\t") for line in result.decode("utf-8").splitlines()] homologies = {name: defaultdict(lambda *a: []) for name, seq in sequences.items()} # FILTER THE RESULTS (MIN_SIZE, MAX_SIZE, SELF-HOMOLOGIES) for query, qstart, qend, subject, sstart, send in parsing: is_self_homology = (query == subject) and (qstart != sstart) if is_self_homology and (not include_self_homologies): continue qstart, qend = int(qstart) - 1, int(qend) sstart, send = int(sstart) - 1, int(send) if qend - qstart < min_size: continue if (max_size is not None) and (qend - qstart > max_size): continue location = (subject, sstart, send) homologies[query][(qstart, qend)].append(location) return homologies def count_homologies(matches, min_size): """Return a dict {(start, end): number_of_homologies_count}. """ homologies_counts = {} if len(matches) == 1: segment = list(matches.keys())[0] homologies_counts[segment] = 1 matches_list = sorted(matches.keys()) for i, match1 in enumerate(matches_list): for match2 in matches_list[i + 1 :]: segment = start, end = (match2[0], min(match1[1], match2[1])) if end < start: # The segment is empty, match1 and match2 as disjunct. break elif (end - start > min_size) and (segment not in homologies_counts): homologies_counts[segment] = len( [ matching for (match_start, match_end) in matches_list for matching in matches[(match_start, match_end)] if match_start <= start <= end <= match_end ] ) return homologies_counts def segment_with_most_homologies(homologies_counts, method="most_coverage_first"): """Select the "best" segment, that should be selected next as a common block.""" def segment_score(segment): if method == "most_coverage_first": factor = homologies_counts[segment] else: factor = 1 start, end = segment return factor * (end - start) return max( [(0, (None, None))] + [(segment_score(segment), segment) for segment in homologies_counts] ) def select_common_blocks( homologies, sequences, min_size=0, method="most_coverage_first" ): """Select a collection of the largest common blocks, iteratively.""" common_blocks = [] homologies_counts = { seqname: count_homologies(matches=homologies[seqname], min_size=min_size) for seqname in sequences } # ITERATIVELY SELECT A COMMON BLOCK AND REMOVE THAT BLOCK FROM THE # homologies IN VARIOUS SEQUENCES, UNTIL THERE IS NO HOMOLOGY while True: # FIND THE HOMOLOGY WITH THE BEST OVERALL SCORE ACROSS ALL SEQS (best_score, (start, end)), seqname = max( [ ( segment_with_most_homologies( homologies_counts[seqname], method=method ), seqname, ) for seqname in sequences ] ) # IF NO HOMOLOGY WAS FOUND AT ALL, STOP if best_score == 0: break # FIND WHERE THE SELECTED SUBSEQUENCE APPEARS IN OTHER SEQUENCES. # AT EACH LOCATION, "EXTRUDE" THE SUBSEQUENCE FROM THE CURRENT # LOCATIONS IN homologies_counts best_subsequence = sequences[seqname][start:end] locations = [] for seqname, sequence in sequences.items(): seq_n_intersections = homologies_counts[seqname] # we look for both the subsequence and its reverse complement: for strand in [1, -1]: if strand == 1: matches = re.finditer(best_subsequence, sequence) else: matches = re.finditer( reverse_complement(best_subsequence), sequence ) for match in matches: # add the location to the list for this subsequence... start, end = match.start(), match.end() locations.append((seqname, (start, end, strand))) # ...then subtract the location from the sequence's # homologies list match_as_segment = tuple(sorted([start, end])) for intersection in list(seq_n_intersections.keys()): score = seq_n_intersections.pop(intersection) for diff in segments_difference(intersection, match_as_segment): diff_start, diff_end = diff if diff_end - diff_start > min_size: seq_n_intersections[diff] = score common_blocks.append((best_subsequence, locations)) # REMOVE SELF-HOMOLOGOUS SEQUENCES common_blocks = [ (seq, locations) for (seq, locations) in common_blocks if len(locations) >= 2 ] # CREATE THE FINAL COMMON_BLOCKS_DICT common_blocks_dict = OrderedDict() if len(common_blocks) > 0: number_size = int(np.log10(len(common_blocks))) + 1 for i, (sequence, locations) in enumerate(common_blocks): block_name = "block_%s" % (str(i + 1).zfill(number_size)) common_blocks_dict[block_name] = { "sequence": sequence, "locations": locations, } return common_blocks_dict
/geneblocks/DiffBlocks/DiffBlock.py
from ..Location import Location from ..biotools import sequences_differences class DiffBlock: """Class to represent a segment that differs between sequences. Parameters ---------- operation One of "insert", "delete", "replace", or "equal". s1_location The Location(start, end) of the region in sequence sequence s1. s2_location The Location(start, end) of the region in sequence sequence s2. """ def __init__(self, operation, s1_location, s2_location): self.operation = operation self.s1_location = s1_location self.s2_location = s2_location def to_feature(self, sequence="s2"): s1_length, s2_length = len(self.s1_location), len(self.s2_location) max_length = max([s1_length, s2_length]) if sequence == "s1": return DiffBlock( self.operation, self.s2_location, self.s1_location ).to_feature() if self.operation == "insert": if max_length <= 7: label = "+%s" % self.s2_location.extract_sequence() else: label = "+ %d nuc." % s2_length elif self.operation == "delete": if max_length <= 7: label = "-%s" % self.s1_location.extract_sequence() else: label = "- %d nuc." % s1_length elif self.operation == "replace": if max([s1_length, s2_length]) <= 6: label = "%s ➤ %s" % ( self.s1_location.extract_sequence(), self.s2_location.extract_sequence(), ) else: sub_s1 = self.s1_location.extract_sequence() sub_s2 = self.s2_location.extract_sequence() diffs = sequences_differences(sub_s1, sub_s2) label = "%d mutations" % diffs elif self.operation == "change": if max([s1_length, s2_length]) <= 6: label = "%s ➤ %s" % ( self.s1_location.extract_sequence(), self.s2_location.extract_sequence(), ) else: label = "%sn ➤ %sn change" % (s1_length, s2_length) elif self.operation == "reverse": label = "was reversed at %d-%d" % ( self.s1_location.start, self.s1_location.end, ) elif self.operation == "transpose": label = "was at %d-%d" % (self.s1_location.start, self.s1_location.end,) elif self.operation == "equal": label = "Equal" return self.s2_location.to_biopython_feature( feature_type="diff_" + self.operation, label=label ) def __str__(self): """Represent a diffbloc, for instance: "insert 10-30|105-105" """ return "%s %s|%s" % (self.operation, self.s1_location, self.s2_location,) def __repr__(self): return str(self)
/geneblocks/DiffBlocks/DiffBlocks.py
from copy import deepcopy import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import numpy as np from ..Location import Location from ..biotools import sequence_to_record from ..CommonBlocks import CommonBlocks from .DiffBlock import DiffBlock from .DiffRecordTranslator import DiffRecordTranslator from .diffblocks_tools import ( compute_levenshtein_blocks, get_optimal_common_blocks, merge_blocs_by_location, merge_successive_blocks, compute_sorted_blocks, ) class DiffBlocks: """Class to generate and represent DiffBlocks. Usage: >>> DiffBlocks.from_sequences(s1, s2) """ def __init__(self, s1, s2, blocks): self.s1 = s1 self.s2 = s2 self.blocks = blocks @staticmethod def from_sequences(s1, s2, blast_over=500, max_complexity=1e8): """Create DiffBlocks by comparing two sequences. Parameters ---------- s1, s2 Two sequences, either "ATGC..." strings or Biopython records. blast_over A blast will be triggered to accelerate homology finding if len(s1) + len(s2) > blast_over. max_complexity If len(s1) * len(s2) is over max_complexity, no analysis is done and s1 is just labeled as a "change" of s2 (useful internally during the recursions of this method). """ # Note: the sequences will always be upperized before they are # compared. however we also need to keep the initial sequences to # create the final blocks (possibly with upper/lowercase nucleotides) # If the sequences are records, convert to string seq_s1 = str(s1.seq) if hasattr(s1, "seq") else str(s1) seq_s2 = str(s2.seq) if hasattr(s2, "seq") else str(s2) # Simple case to eliminate the trivial case of equality if seq_s1.upper() == seq_s2.upper(): return DiffBlocks(s1, s2, []) # If the sequences are too big for straight-on Levenshtein, first # find the large sub-blocks that are identical, and the ones that # differ. if (blast_over is not None) and (len(s1) + len(s2)) > blast_over: diffblocks = [] # Use CommonBlocks to find all big common blocks sequences = {"s1": s1, "s2": s2} common_blocks = CommonBlocks.from_sequences( sequences, min_block_size=100, include_self_homologies=False, block_selection_method="larger_first", ).common_blocks blocks_in_seqs, remarks = get_optimal_common_blocks(common_blocks) # First, each common block is added as an "equal" diffblock for b1, b2 in zip(blocks_in_seqs["s1"], blocks_in_seqs["s2"]): diffblocks.append( DiffBlock( "equal", s1_location=Location(*b1[:2], sequence=s1), s2_location=Location(*b2[:2], sequence=s2), ) ) # for sequence in s1, s2, complete the sequence's list of blocks # with a (0, 0, "START") on the left, (L, L, "END") on the right. for seq, blocks in blocks_in_seqs.items(): blocks_in_seqs[seq] = ( [(0, 0, "START")] + blocks_in_seqs[seq] + [(len(sequences[seq]), len(sequences[seq]), "END")] ) for i in range(len(blocks_in_seqs["s2"]) - 1): _, end1, _ = blocks_in_seqs["s1"][i] next_start1, _, _ = blocks_in_seqs["s1"][i + 1] _, end2, _ = blocks_in_seqs["s2"][i] next_start2, _, _ = blocks_in_seqs["s2"][i + 1] if next_start2 < end2: subdiffblocks = [ DiffBlock( "delete", s1_location=Location(end1, next_start1, sequence=s1), s2_location=Location(next_start2, next_start2, sequence=s2), ) ] else: subsequence_1 = s1[end1:next_start1] subsequence_2 = s2[end2:next_start2] subdiffblocks = DiffBlocks.from_sequences( subsequence_1, subsequence_2, blast_over=None, max_complexity=max_complexity, ) for block in subdiffblocks.blocks: block.s1_location.start += end1 block.s1_location.end += end1 block.s1_location.sequence = s1 block.s2_location.start += end2 block.s2_location.end += end2 block.s2_location.sequence = s2 diffblocks += subdiffblocks.blocks diffblocks = [ b for b in diffblocks if len(b.s1_location) or len(b.s2_location) ] sorted_blocks = compute_sorted_blocks(diffblocks + remarks) return DiffBlocks(s1, s2, sorted_blocks) s1_std = str(s1.seq if hasattr(s1, "seq") else s1).upper() s2_std = str(s2.seq if hasattr(s2, "seq") else s2).upper() levenshtein_blocks = compute_levenshtein_blocks( s1_std, s2_std, max_complexity=max_complexity ) blocks = [ DiffBlock( operation, Location(s1s, s1e, sequence=s1), Location(s2s, s2e, sequence=s2), ) for operation, (s1s, s1e), (s2s, s2e) in levenshtein_blocks ] return DiffBlocks(s1, s2, blocks) def merged( self, blocks_per_span=(3, 600), change_gap=100, replace_gap=10, reference="s2", ): blocks = [b for b in self.blocks if b.operation not in ["reverse", "transpose"]] remarks = [b for b in self.blocks if b.operation in ["reverse", "transpose"]] if blocks_per_span is not None: max_blocks, span = blocks_per_span blocks = merge_blocs_by_location( blocks=blocks, max_blocks=max_blocks, max_span=span, reference=reference, ) if change_gap is not None: blocks = merge_successive_blocks( blocks=blocks, change_gap=change_gap, replace_gap=replace_gap, reference="s2", ) blocks = compute_sorted_blocks(blocks + remarks) return DiffBlocks(s1=self.s1, s2=self.s2, blocks=blocks) def sort_blocks(self): self.blocks = compute_sorted_blocks(self.blocks) def diffs_as_features(self, sequence="s2"): return [block.to_feature(sequence=sequence) for block in self.blocks] def plot( self, translator_class="default", separate_axes=True, sequence="s2", **plot_kw ): if translator_class == "default": translator_class = DiffRecordTranslator translator = translator_class() record = deepcopy(self.s2 if sequence == "s2" else self.s1) if not hasattr(record, "features"): record = sequence_to_record(record) diff_features = self.diffs_as_features(sequence=sequence) if separate_axes: gr_record = translator.translate_record(record) record.features = diff_features gr_diffrecord = DiffRecordTranslator().translate_record(record) width = plot_kw.get("figure_width", 8) if "axes" in plot_kw: ax1, ax2 = plot_kw["axes"] fig = ax1.figure else: fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(width, 6)) plot_kw["annotate_inline"] = plot_kw.get("annotate_inline", True) _, stats1 = gr_record.plot(ax=ax1, **plot_kw) _, stats2 = gr_diffrecord.plot(ax=ax2, with_ruler=False, **plot_kw) max_features_1 = gr_record.feature_level_height * max( [0] + [v for v in stats1[0].values()] ) max_level_1 = max( [max_features_1] + [v["annotation_y"] for v in stats1[1].values()] ) max_level_2 = max([1] + [v["annotation_y"] for v in stats2[1].values()]) + 2 max_level_1 = int(np.round(max_level_1)) max_level_2 = int(np.round(max_level_2)) # print (stats2) n_levels = max_level_1 + max_level_2 if max_level_1 and max_level_2: plt.close(fig) ## easing = 3 gs = gridspec.GridSpec(n_levels + 2 * easing, 1) fig = plt.figure(figsize=(width, 1 + 0.5 * n_levels), facecolor="w") ax1 = fig.add_subplot(gs[: max_level_1 + easing]) ax2 = fig.add_subplot(gs[max_level_1 + easing :]) _, stats1 = gr_record.plot(ax=ax1, **plot_kw) _, stats2 = gr_diffrecord.plot(ax=ax2, with_ruler=False, **plot_kw) # fig.set_size_inches((width, 3 + 0.4 * n_levels)) ax2.set_ylim(bottom=-2) ax2.invert_yaxis() for f in gr_diffrecord.features: ax1.fill_between( [f.start, f.end], y1=max_features_1 + 1, y2=-1, facecolor=f.color, alpha=0.07, zorder=1000, ) return (ax1, ax2) else: record.features += diff_features gr_record = translator.translate_record(record) ax, _ = gr_record.plot(**plot_kw) return ax @staticmethod def reconstruct_sequences_from_blocks(blocks): s1, s2 = "", "" blocks = sorted(blocks, key=lambda b: b.s2_location.to_tuple()) for block in blocks: if block.operation in ("equal", "replace", "change", "delete"): s1 = s1 + block.s1_location.extract_sequence() if block.operation in ("equal", "replace", "change", "insert"): s2 = s2 + block.s2_location.extract_sequence() return s1, s2 def __str__(self): return ", ".join([str(b) for b in self.blocks])
/geneblocks/DiffBlocks/DiffRecordTranslator.py
from dna_features_viewer import BiopythonTranslator class DiffRecordTranslator(BiopythonTranslator): ignored_features_types = ("diff_equal",) default_box_color = None @staticmethod def compute_feature_color(f): return dict( diff_delete="#E76F51", # RED diff_insert="#2A9D8F", # GREEN diff_replace="#E9C46A", # YELLOW diff_change="#F4A261", # ORANGE diff_reverse="white", diff_transpose="white", ).get(f.type, "white") @staticmethod def compute_feature_box_linewidth(f): return 1 if f.type.startswith("diff_") else 0 @staticmethod def compute_feature_fontdict(f): return {"fontsize": 12 if f.type.startswith("diff_") else 9}
/geneblocks/DiffBlocks/__init__.py
from .DiffBlocks import DiffBlocks, DiffBlock from .DiffRecordTranslator import DiffRecordTranslator __all__ = ['DiffBlocks', 'DiffBlock', 'DiffRecordTranslator']
/geneblocks/DiffBlocks/diffblocks_tools.py
from copy import deepcopy import networkx as nx import Levenshtein from ..Location import Location from .DiffBlock import DiffBlock def compute_levenshtein_blocks(seq1, seq2, max_complexity=1e8): """Compute the Levenshtein blocks of insertion, deletion, replacement. """ # TODO: better method for dealing with long sequences? l1, l2 = len(seq1), len(seq2) if l1 * l2 > max_complexity: return [("change", (0, l1), (0, l2))] def block_format(op, s1, e1, s2, e2): if op == "delete": return (op, (s1, e1 + 1), (s2, e2)) if op == "insert": return (op, (s1, e1), (s2, e2 + 1)) else: return (op, (s1, e1 + 1), (s2, e2 + 1)) edits = Levenshtein.editops(seq1, seq2) if len(edits) == 0: return [] bop, s1, s2 = edits[0] e1, e2 = s1, s2 blocks = [] for (op, _e1, _e2) in edits[1:]: continuity = any( [ all([op == "delete", _e1 == e1 + 1, e2 == _e2]), all([op == "insert", _e1 == e1, _e2 == e2 + 1]), all([op == "replace", _e1 == e1 + 1, _e2 == e2 + 1]), ] ) if op == bop and continuity: e1, e2 = _e1, _e2 else: blocks.append(block_format(bop, s1, e1, s2, e2)) bop, s1, s2 = op, _e1, _e2 e1, e2 = s1, s2 blocks.append(block_format(bop, s1, e1, s2, e2)) return blocks def merge_subblocks(subblocks): s1_location = Location( min([b.s1_location.start for b in subblocks]), max([b.s1_location.end for b in subblocks]), sequence=subblocks[0].s1_location.sequence, ) s2_location = Location( min([b.s2_location.start for b in subblocks]), max([b.s2_location.end for b in subblocks]), sequence=subblocks[0].s2_location.sequence, ) if len(s1_location) == len(s2_location): return DiffBlock("replace", s1_location, s2_location) else: return DiffBlock("change", s1_location, s2_location) def merge_blocs_by_location(blocks, max_blocks, max_span, reference="s2"): while 1: for i in range(len(blocks) - max_blocks): subblocks = blocks[i : i + max_blocks] b1, b2 = subblocks[0], subblocks[-1] if reference == "s2": span = b2.s2_location.end - b1.s2_location.start else: span = b2.s1_location.end - b1.s1_location.start if span < max_span: new_block = merge_subblocks(subblocks) blocks = blocks[:i] + [new_block] + blocks[i + max_blocks :] break else: break return blocks def merge_successive_blocks(blocks, change_gap=10, replace_gap=5, reference="s2"): while 1: for i in range(len(blocks) - 1): b1, b2 = blocks[i], blocks[i + 1] operations = (b1.operation, b2.operation) if reference == "s2": gap = b2.s2_location.start - b1.s2_location.end else: gap = b2.s1_location.start - b1.s1_location.end if "equal" in operations: continue if "change" in operations and (gap < change_gap): new_block = merge_subblocks([b1, b2]) blocks = blocks[:i] + [new_block] + blocks[i + 2 :] break if operations == ("replace", "replace") and gap < replace_gap: new_block = merge_subblocks([b1, b2]) blocks = blocks[:i] + [new_block] + blocks[i + 2 :] break else: break return blocks def compute_sorted_blocks(blocks, reference="s2"): def sort_key(block): if reference == "s2": return block.s2_location.to_tuple() else: return block.s1_location.to_tuple() return sorted(blocks, key=sort_key) def get_optimal_common_blocks(common_blocks): common_blocks = deepcopy(common_blocks) remarks = [] # Make so that there is never an antisense block in s1 and a + block in s2. # If it is so, flip the block in s2. It will become antisense and be later # removed for block_name, data in common_blocks.items(): locations = data["locations"] s1_strands = [strand for (s, (_, _, strand)) in locations if s == "s1"] if 1 not in s1_strands: for i, location in enumerate(locations): seq, (start, end, strand) = location if seq == "s2": locations[i] = (seq, (start, end, -strand)) # Remove every antisense blocks now. For the ones in s2, log this # with a remark. for block_name, data in common_blocks.items(): locations = data["locations"] for i, location in enumerate(locations): (seq, (start, end, strand)) = location if seq == "s2" and (strand == -1): locations.remove(location) _, (start1, end1, strand1) = locations[0] remarks.append( DiffBlock( "reverse", s1_location=Location(start1, end1, strand1), s2_location=Location(start, end), ) ) # We start the structure that will be returned in the end blocks_in_seqs = { seq: sorted( [ (start, end, bname) for bname, data in common_blocks.items() for (s, (start, end, strand)) in data["locations"] if s == seq ] ) for seq in ("s1", "s2") } # Identify blocks appearing only in one of the two sequences blocks_in_s1 = set(b[-1] for b in blocks_in_seqs["s1"]) blocks_in_s2 = set(b[-1] for b in blocks_in_seqs["s2"]) uniblocks = (blocks_in_s1.union(blocks_in_s2)).difference( blocks_in_s1.intersection(blocks_in_s2) ) # Remove blocks appearing only in one of the two sequences # as they are useless for sequences comparison # this should be very rare but you never know. for block_list in blocks_in_seqs.values(): for b in block_list: if b[-1] in uniblocks: block_list.remove(b) for b1, b2 in zip(block_list, block_list[1:]): start1, end1, __name1 = b1 start2, end2, __name2 = b2 if end2 <= end1: block_list.remove(b2) # If a block appears several time in a sequence (self-homology) # give unique names to each occurence: block_1, block_1*, etc. blocks_in_seqs_dicts = dict(s1={}, s2={}) for seq, blocks_list in list(blocks_in_seqs.items()): seen_blocks = set() for i, (start, end, block_name) in enumerate(blocks_list): while block_name in seen_blocks: block_name = block_name + "*" blocks_list[i] = start, end, block_name blocks_in_seqs_dicts[seq][block_name] = dict(rank=i, location=(start, end)) seen_blocks.add(block_name) # Find and retain the largest sequence of blocks which is in the right # order in both sequences. We will remove every other block. # We do that by looking for the longest path in a graph if len(blocks_in_seqs_dicts["s2"]) < 2: retained_blocks = list(blocks_in_seqs_dicts["s2"]) else: s1_dict = blocks_in_seqs_dicts["s1"] graph = nx.DiGraph( [ (b1, b2) for b1, data1 in blocks_in_seqs_dicts["s2"].items() for b2, data2 in blocks_in_seqs_dicts["s2"].items() if (b2 in s1_dict) and (b1 in s1_dict) and (s1_dict[b2]["rank"] > s1_dict[b1]["rank"]) and (data2["rank"] > data1["rank"]) ] ) retained_blocks = nx.dag_longest_path(graph) # remove any "misplaced" block that is not in the retained list. # log a remark for the ones in s2. for seq in ("s1", "s2"): blocks_list = blocks_in_seqs[seq] for block in list(blocks_list): # copy cause we will remove elements start, end, block_name = block if block_name not in retained_blocks: blocks_list.remove(block) if seq == "s2": new_block_name = block_name.strip("*") s1_blocks = blocks_in_seqs_dicts["s1"] s1_loc = s1_blocks[new_block_name]["location"] start1, end1 = s1_loc if ( len(remarks) and (start == remarks[-1].s2_location.end) and (start1 == remarks[-1].s1_location.end) ): remarks[-1].s1_location.end = end1 remarks[-1].s2_location.end = end else: remarks.append( DiffBlock( "transpose", s1_location=Location(start1, end1), s2_location=Location(start, end), ) ) # Reduce blocks when there is overlap blocks_to_reduce = {} for seq in ("s1", "s2"): blocks_list = blocks_in_seqs[seq] for b1, b2 in zip(blocks_list, blocks_list[1:]): start1, end1, block_name1 = b1 start2, end2, __block_name2 = b2 diff = end1 - start2 if diff > 0: if block_name1 not in blocks_to_reduce: blocks_to_reduce[block_name1] = 0 blocks_to_reduce[block_name1] = max(blocks_to_reduce[block_name1], diff) for seq in ("s1", "s2"): blocks_list = blocks_in_seqs[seq] for i, (start, end, block_name) in enumerate(blocks_list): if block_name in blocks_to_reduce: diff = blocks_to_reduce[block_name] blocks_list[i] = (start, end - diff, block_name) return blocks_in_seqs, remarks
/geneblocks/Location.py
from .biotools import reverse_complement from Bio.SeqFeature import SeqFeature, FeatureLocation class Location: def __init__( self, start, end, strand=None, sequence=None, sequence_id=None ): self.start = start self.end = end self.strand = strand self.sequence = sequence self.sequence_id = sequence_id def extract_sequence(self, sequence=None): """Return the subsequence read at the given location. If sequence is None, ``self.sequence`` is used. """ if sequence is None: sequence = self.sequence if hasattr(sequence, "seq"): sequence = str(sequence.seq) result = sequence[self.start : self.end] if self.strand == -1: return reverse_complement(result) else: return result def __repr__(self): """Represent""" result = "%d-%d" % (self.start, self.end) if self.strand is not None: result += {1: "(+)", -1: "(-)", 0: ""}[self.strand] if self.sequence_id is not None: result = self.sequence_id + "|" + result return result def __len__(self): """Size of the location""" return abs(self.end - self.start) def to_tuple(self): return self.start, self.end, self.strand def to_biopython_location(self): """Return a Biopython FeatureLocation equivalent to the location.""" start, end, strand = [ None if e is None else int(e) for e in [self.start, self.end, self.strand] ] return FeatureLocation(start, end, strand) def to_biopython_feature(self, feature_type="misc_feature", **qualifiers): """Return a Biopython SeqFeature with same location and custom qualifiers.""" return SeqFeature( self.to_biopython_location(), type=feature_type, qualifiers=qualifiers, )
/geneblocks/__init__.py
""" geneblocks/__init__.py """ # __all__ = [] from .CommonBlocks import CommonBlocks from .DiffBlocks import DiffBlocks, DiffRecordTranslator from .biotools import load_record, random_dna_sequence, reverse_complement from .utils import sequences_are_circularly_equal from .version import __version__ __all__ = [ "CommonBlocks", "DiffBlocks", "DiffRecordTranslator", "load_record", "random_dna_sequence", "reverse_complement", "sequences_are_circularly_equal" "__version__", ]
/geneblocks/biotools.py
import tempfile import subprocess import numpy as np try: from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from Bio.SeqFeature import SeqFeature, FeatureLocation from Bio import SeqIO BIOPYTHON_AVAILABLE = True except ImportError: BIOPYTHON_AVAILABLE = False try: # Biopython <1.78 from Bio.Alphabet import DNAAlphabet has_dna_alphabet = True except ImportError: # Biopython >=1.78 has_dna_alphabet = False complements_dict = {"A": "T", "T": "A", "C": "G", "G": "C"} def random_dna_sequence(length, probas=None, seed=None): """Return a random DNA sequence ("ATGGCGT...") with the specified length. Parameters ---------- length Length of the DNA sequence. proba Frequencies for the different nucleotides, for instance ``probas={"A":0.2, "T":0.3, "G":0.3, "C":0.2}``. If not specified, all nucleotides are equiprobable (p=0.25). seed The seed to feed to the random number generator. When a seed is provided the random results depend deterministically on the seed, thus enabling reproducibility """ if seed is not None: np.random.seed(seed) if probas is None: sequence = np.random.choice(list("ATCG"), length) else: bases, probas = zip(*probas.items()) sequence = np.random.choice(bases, length, p=probas) return "".join(sequence) def load_record(filename, linear=True, name="id", upperize=True): formt = "genbank" if filename.endswith(("gb", "gbk")) else "fasta" record = SeqIO.read(filename, formt) if upperize: record.seq = record.seq.upper() record.linear = linear if name != "id": record.id = name record.name = record.id.replace(" ", "_")[:20] return record def complement(sequence): return "".join(complements_dict[c] for c in sequence) def reverse_complement(sequence): return complement(sequence)[::-1] def sequence_to_record(sequence, record_id=None, name="unnamed", features=()): if not BIOPYTHON_AVAILABLE: raise ImportError("Creating records requires Biopython installed.") if has_dna_alphabet: # Biopython <1.78 sequence = Seq(sequence, alphabet=DNAAlphabet()) else: sequence = Seq(sequence) seqrecord = SeqRecord(sequence, name=name, id=record_id, features=list(features),) seqrecord.annotations["molecule_type"] = "DNA" return seqrecord def annotate_record( seqrecord, location="full", feature_type="feature", margin=0, **qualifiers ): """Add a feature to a Biopython SeqRecord. Parameters ---------- seqrecord The biopython seqrecord to be annotated. location Either (start, end) or (start, end, strand). (strand defaults to +1) feature_type The type associated with the feature margin Number of extra bases added on each side of the given location. qualifiers Dictionnary that will be the Biopython feature's `qualifiers` attribute. """ if not BIOPYTHON_AVAILABLE: raise ImportError("Creating records requires Biopython installed.") if location == "full": location = (margin, len(seqrecord) - margin) strand = location[2] if len(location) == 3 else 1 seqrecord.features.append( SeqFeature( FeatureLocation(location[0], location[1], strand), qualifiers=qualifiers, type=feature_type, ) ) def sequences_differences_array(seq1, seq2): """Return an array [0, 0, 1, 0, ...] with 1s for sequence differences. seq1, seq2 should both be ATGC strings. """ if len(seq1) != len(seq2): raise ValueError( "Only use on same-size sequences (%d, %d)" % (len(seq1), len(seq2)) ) arr1 = np.fromstring(seq1, dtype="uint8") arr2 = np.fromstring(seq2, dtype="uint8") return arr1 != arr2 def sequences_differences(seq1, seq2): """Return the number of nucleotides that differ in the two sequences. seq1, seq2 should be strings of DNA sequences e.g. "ATGCTGTGC" """ return sequences_differences_array(seq1, seq2).sum()
/geneblocks/sequence_modification_utils.py
"""These methods are only useful to build examples and tests for Geneblocks.""" from .biotools import reverse_complement def change(seq, start, end, change): """Return the sequence with ``seq[start:end]`` replaced by ``change``""" return seq[:start] + change + seq[end:] def insert(seq, pos, inserted): """Return the sequence with ``inserted`` inserted, starting at index 'pos' """ return seq[:pos] + inserted + seq[pos:] def delete(seq, pos, deletions): """Return the sequence with a number of deletions from position pos.""" return seq[:pos] + seq[pos + deletions :] def reverse(seq, start, end): """Return the sequence with segment seq[start:end] reverse-complemented.""" return seq[:start] + reverse_complement(seq[start:end]) + seq[end:] def move(seq, start, end, diff): """Move a subsequence by "diff" nucleotides the left or the right.""" sub = seq[start:end] if diff > 0: return seq[:start] + seq[end : end + diff] + sub + seq[end + diff :] else: return ( seq[: start + diff] + sub + seq[start + diff : start] + seq[end:] ) def swap(seq, pos1, pos2): """Return a new sequence with segments at position pos1 and pos2 swapped. pos1, pos2 are both of the form (start1, end1), (start2, end2) """ (start1, end1), (start2, end2) = sorted([pos1, pos2]) return ( seq[:start1] + seq[start2:end2] + seq[end1:start2] + seq[start1:end1] + seq[end2:] ) def copy(seq, start, end, new_start): """Return the sequence with segment [start, end] also copied elsewhere, starting in new_start.""" return insert(seq, new_start, seq[start:end])
/geneblocks/utils.py
from .CommonBlocks import CommonBlocks from .biotools import sequence_to_record def _turn_sequence_into_record_if_necessary(sequence, record_id="id"): if hasattr(sequence, "seq"): return sequence else: return sequence_to_record(sequence, record_id=record_id) def sequences_are_circularly_equal(sequences): """Return whether all the sequences represent the same circular sequence. This means that the sequences are differently rotated versions of a same circular sequence, and for any pair s1, s2 in the sequences, there is an index i such that s1 = s2[i:] + s2[:i]. The ``sequences`` parameter should be a list of "ATGC" strings or SeqRecords. """ sequences = [ _turn_sequence_into_record_if_necessary(seq, record_id="REC_%d" % i) for i, seq in enumerate(sequences) ] if len(sequences) < 2: raise ValueError("Provide at least 2 sequences") elif len(sequences) > 2: first_equal = sequences_are_circularly_equal(sequences[:2]) return first_equal and sequences_are_circularly_equal(sequences[1:]) s1, s2 = sequences[:2] if s1.id == s2.id: s1.id += "_b" if len(s1) != len(s2): return False blocks = CommonBlocks.from_sequences(sequences=[s1, s2], min_block_size=2) if len(blocks.common_blocks) > 2: return False potential_pivot_indices = [ index for data in blocks.common_blocks.values() for (origin, (start, end, _)) in data["locations"] for index in [start, end] ] s1, s2 = str(s1.seq), str(s2.seq) for index in potential_pivot_indices: new_s1 = s1[index:] + s1[:index] if new_s1 == s2: return True return False
/setup.py
import ez_setup ez_setup.use_setuptools() from setuptools import setup, find_packages exec(open("geneblocks/version.py").read()) # loads __version__ setup( name="geneblocks", version=__version__, author="Zulko", description="Library to compare DNA sequences (diff, common blocks, etc.)", long_description=open("pypi-readme.rst").read(), license="MIT", url="https://github.com/Edinburgh-Genome-Foundry/geneblocks", keywords="DNA sequence blocks diff synthetic-biology bioinformatics", packages=find_packages(exclude="docs"), install_requires=[ "numpy", "Biopython", "dna_features_viewer", "networkx", "python-Levenshtein", ], )
/tests/test_base_diff_cases.py
from geneblocks.DiffBlocks import DiffBlock, DiffBlocks from geneblocks.biotools import random_dna_sequence def diff_string(seq1, seq2, contract_under=0): return str(DiffBlocks.from_sequences(seq1, seq2)) flanks_dict = { 'short': 10 * "A", 'long': random_dna_sequence(10000) } def flanked(seq, flanks='short'): flank = flanks_dict[flanks] return flank + seq + flank def test_1(): assert diff_string( flanked("T", "short"), flanked("C", "short") ) == 'replace 10-11|10-11' def test_2(): assert diff_string( flanked("T", "long"), flanked("C", "long") ) == ', '.join([ 'equal 0-10000|0-10000', 'replace 10000-10001|10000-10001', 'equal 10001-20001|10001-20001' ]) def test_3(): assert diff_string( "AATAATAAT", "AAAAAAAAA" ) == 'replace 2-3|2-3, replace 5-6|5-6, replace 8-9|8-9' def test_4(): assert diff_string( flanked("AATAATAAT", 'long'), flanked("AAAAAAAAA", 'long') ) == ', '.join([ 'equal 0-10002|0-10002', 'replace 10002-10003|10002-10003', 'replace 10005-10006|10005-10006', 'replace 10008-10009|10008-10009', 'equal 10009-20009|10009-20009' ]) def test_5(): assert diff_string( "AAAAATTTTGGAAA", "AAATTTTGGAAAAA" ) == 'delete 3-5|3-3, insert 11-11|9-11' def test_6(): assert diff_string( flanked("AAAAATTTTGGAAA", 'long'), flanked("AAATTTTGGAAAAA", 'long') ) == ', '.join([ 'equal 0-10003|0-10003', 'delete 10003-10005|10003-10003', 'insert 10011-10011|10009-10011', 'equal 10011-20014|10011-20014' ])
/tests/test_complex_sequences.py
from geneblocks import DiffBlocks, CommonBlocks, random_dna_sequence import geneblocks.sequence_modification_utils as smu import matplotlib.pyplot as plt def test_complex_sequences(): seq1 = random_dna_sequence(50000, seed=123) seq1 = smu.copy(seq1, 25000, 30000, 50000) seq2 = seq1 seq2 = smu.insert(seq2, 39000, random_dna_sequence(100)) seq2 = smu.insert(seq2, 38000, random_dna_sequence(100)) seq2 = smu.reverse(seq2, 30000, 35000) seq2 = smu.swap(seq2, (30000, 35000), (45000, 480000)) seq2 = smu.delete(seq2, 20000, 2000) seq2 = smu.insert(seq2, 10000, random_dna_sequence(2000)) seq2 = smu.insert(seq2, 0, 1000 * "A") diff_blocks = DiffBlocks.from_sequences(seq1, seq2).merged() b = diff_blocks.blocks assert len(b) == 15 assert b[0].operation == "insert" start, end, _ = b[0].s2_location.to_tuple() assert end - start == 1000 assert b[1].operation == "equal" assert b[2].operation == "insert" start, end, _ = b[2].s2_location.to_tuple() assert end - start == 2000 assert sorted([b[6].operation, b[7].operation]) == ["change", "transpose"] assert sorted([b[-1].operation, b[-2].operation]) == ["change", "reverse"] s1, s2 = diff_blocks.reconstruct_sequences_from_blocks(diff_blocks.blocks) assert s1 == seq1 assert s2 == seq2
/tests/test_scenarios.py
import os import matplotlib import networkx as nx from geneblocks import CommonBlocks, DiffBlocks, load_record from geneblocks.biotools import reverse_complement, random_dna_sequence from geneblocks.DiffBlocks import DiffBlock from geneblocks.Location import Location matplotlib.use("Agg") def test_CommonBlocks_basics(tmpdir): C1, A, B, C2, C3, D, E, F = [random_dna_sequence(100 * L) for L in range(1, 9)] sequences = { "a": C1 + A + C2, "b": B + D + C2 + C3, "c": E + C1 + C2, "d": C2 + C1 + F + C3, "e": C3 + reverse_complement(C2 + C1), } common_blocks = CommonBlocks.from_sequences(sequences) assert len(common_blocks.common_blocks) == 3 axes = common_blocks.plot_common_blocks() fig_path = os.path.join(str(tmpdir), "basic_example.png") axes[0].figure.savefig(fig_path, bbox_inches="tight") # GET ALL COMMON BLOCKS AS BIOPYTHON RECORDS _ = common_blocks.common_blocks_records() _ = common_blocks.unique_blocks_records() # WRITE ALL COMMON BLOCKS INTO A CSV SPREADSHEET csv_path = os.path.join(str(tmpdir), "basic_example.csv") common_blocks.common_blocks_to_csv(target_file=csv_path) def test_DiffBlocks_basics(tmpdir): seq_1 = load_record(os.path.join("tests", "sequences", "sequence1.gb")) seq_2 = load_record(os.path.join("tests", "sequences", "sequence2.gb")) diff_blocks = DiffBlocks.from_sequences(seq_1, seq_2).merged() # next line is just to cover separate_axes=false diff_blocks.plot(figure_width=8, separate_axes=False) ax1, __ax2 = diff_blocks.plot(figure_width=8) fig_path = os.path.join(str(tmpdir), "diff_blocks.png") ax1.figure.savefig(fig_path, bbox_inches="tight") assert list(map(str, diff_blocks.blocks)) == [ "insert 0-0|0-120", "equal 0-1000|120-1120", "replace 1000-1004|1120-1124", "equal 1004-1503|1124-1623", "insert 1503-1503|1623-1723", "equal 1503-2304|1723-2524", "delete 2304-2404|2524-2524", "equal 2404-3404|2524-3524", ] def test_features_transfer(): seq_folder = os.path.join("tests", "sequences", "features_transfer") insert = load_record(os.path.join(seq_folder, "insert.gb"), name="insert") plasmid = load_record( os.path.join(seq_folder, "plasmid_to_annotate.gb"), name="plasmid" ) blocks = CommonBlocks.from_sequences([insert, plasmid]) records = blocks.copy_features_between_common_blocks(inplace=False) assert len(records["plasmid"].features) == 6 assert len(plasmid.features) == 2 blocks.copy_features_between_common_blocks(inplace=True) assert len(plasmid.features) == 6 def test_networkx_dag_longest_path(): # Github issue #7 # networkx >=2.6 has a different correct output. This test catches future changes. test_graph = nx.DiGraph([("block_1", "block_3"), ("block_1", "block_2")]) assert nx.dag_longest_path(test_graph) == ["block_1", "block_3"] def test_good_management_of_homologies(): """This checks for a former obscure bug where a sequence with 2 homologies in seq2 corresponding to a single sequence in s1 used to cause an index error due to the "*" added by the algorithm to the end of homologies.""" # See also Github issue #7 b1 = random_dna_sequence(4000, seed=123) b2 = random_dna_sequence(4000, seed=234) b3 = random_dna_sequence(4000, seed=345) seq1 = b1 + "A" + "T" + b2 + b3 seq2 = "T" + b1 + "T" + b3 + b2 + b1 + b1 blocks = DiffBlocks.from_sequences(seq1, seq2).merged() assert len(blocks.blocks) == 9
/tests/test_utils.py
from geneblocks.utils import sequences_are_circularly_equal from Bio import SeqIO import os this_directory = os.path.dirname(os.path.realpath(__file__)) def test_sequences_are_circularly_equal(): block_1 = "ATGTGCACACGCACCGTGTGTGCACACACGTGTGCACACACGTGCACACGGTGT" block_2 = "ACACACATATACGCGTGCGTGCAAAACACATTTTACACGGCACGTGCA" block_3 = "ACCCACACTTTGTGTCGCGCACACGTGTG" # Three rotated sequences seq_1 = block_1 + block_2 + block_3 seq_2 = block_2 + block_3 + block_1 seq_3 = block_3 + block_1 + block_2 # Sequences not equivalent to the previous seq_4 = block_2 + block_3 + block_1 + "A" seq_5 = block_1 + block_2 + block_2 + block_3 assert sequences_are_circularly_equal([seq_1, seq_2]) assert sequences_are_circularly_equal([seq_1, seq_2, seq_3]) assert not sequences_are_circularly_equal([seq_1, seq_2, seq_3, seq_4]) assert not sequences_are_circularly_equal([seq_1, seq_5]) def test_long_equivalents(): """Test with two 12kb sequences with only a 10bp shift""" path = os.path.join(this_directory, "sequences", "long_equivalents.fa") seq_a, seq_b = SeqIO.parse(path, 'fasta') assert sequences_are_circularly_equal([seq_a, seq_b])
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
doctor-budoka/shared-expenses-site
refs/heads/master
{"/expenses_app/auth/auth.py": ["/expenses_app/models.py", "/expenses_app/auth/forms.py"], "/expenses_app/commands.py": ["/expenses_app/models.py"], "/expenses_app/group/group.py": ["/expenses_app/models.py", "/expenses_app/group/forms.py"]}
└── ├── config.py └── expenses_app ├── __init__.py ├── auth │ ├── auth.py │ └── forms.py ├── commands.py ├── group │ ├── forms.py │ └── group.py └── models.py
/config.py
from os import environ from pathlib import Path from dotenv import load_dotenv BASE_DIRECTORY = Path(__file__).parent load_dotenv(BASE_DIRECTORY / ".env") class Config: FLASK_ENV = "development" TESTING = True DEBUG = True SECRET_KEY = environ.get("SECRET_KEY") STATIC_FOLDER = "static" TEMPLATES_FOLDER = "templates" # Database values SQLALCHEMY_DATABASE_URI = environ.get("SQLALCHEMY_DATABASE_URI") SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True
/expenses_app/__init__.py
from flask import Flask from expenses_app.models import db from flask_login import LoginManager login_manager = LoginManager() def create_app(): app = Flask(__name__, template_folder="templates") app.config.from_object("config.Config") db.init_app(app) login_manager.init_app(app) with app.app_context(): from expenses_app.auth import auth app.register_blueprint(auth.auth_bp) from expenses_app.group import group app.register_blueprint(group.grp_bp) from expenses_app import commands db.create_all() return app
/expenses_app/auth/auth.py
from flask import url_for, flash, render_template, make_response from flask import Blueprint from flask_login import login_user, login_required, logout_user from werkzeug.utils import redirect from expenses_app import db, login_manager from expenses_app.auth.forms import LogInForm, Register from expenses_app.models import AuthorisedEmail, User auth_bp = Blueprint( 'auth_bp', __name__, template_folder='templates', static_folder='static' ) @auth_bp.route("/login", methods=["GET", "POST"]) def login(): form = LogInForm() if form.validate_on_submit(): email = form.email.data password = form.password.data email = AuthorisedEmail.query.filter(AuthorisedEmail.email == email).first() if email and email.user and email.user.check_password(password): user = email.user login_user(user) return redirect(url_for("grp_bp.index")) else: # TODO: Limit number of retries flash("Invalid email or password!") return render_template("login.html", form=form) @auth_bp.route("/register", methods=["GET", "POST"]) def register(): form = Register() if form.validate_on_submit(): email = form.email.data username = form.username.data username_exists = User.query.filter_by(username=username).first() auth_email = AuthorisedEmail.query.filter_by(email=email).first() if auth_email and auth_email.is_registered: flash("You are already registered! Try logging in instead!") elif auth_email and username_exists: flash("That username already exists! Try another") elif auth_email: password = form.password.data user = User.create_user(auth_email, password, username) db.session.commit() if user: login_user(user) return redirect(url_for("grp_bp.index")) else: # TODO: Handle these errors more nicely return make_response("Something went wrong with registration!", 500) else: flash("Email is not an authorised email! This is a private service.") return render_template("register.html", form=form) @auth_bp.route("/logout") @login_required def logout(): logout_user() return redirect(url_for("auth_bp.login")) @login_manager.user_loader def load_user(user_id): if user_id is not None: return User.query.get(user_id) return None @login_manager.unauthorized_handler def unauthorized(): flash('You must be logged in to view that page.') return redirect(url_for('auth_bp.login'))
/expenses_app/auth/forms.py
from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, PasswordField from wtforms.validators import InputRequired, Email, EqualTo class LogInForm(FlaskForm): email = StringField("Email", [ InputRequired(message="You must provide an email address to continue"), Email(message="Email entered is not a valid email address")]) password = PasswordField("Password", [InputRequired(message="You must provide a password to continue")]) submit = SubmitField("Submit") class Register(FlaskForm): email = StringField("Email", [ InputRequired(message="You must provide an email address to continue"), Email(message="Email entered is not a valid email address")]) username = StringField( "Username", [InputRequired(message="You must profice a username to continue")] ) password = PasswordField("Password", [InputRequired(message="You must provide a password to continue")]) confirm = PasswordField("Confirm", [ InputRequired(message="You must provide a password to continue"), EqualTo("password", message="Password and confirmation must be the same!") ]) submit = SubmitField("Submit")
/expenses_app/commands.py
import click from flask import current_app as app from expenses_app.models import db, AuthorisedEmail, User @app.cli.command("reset-db") def reset_db(): """Used to reset the db for the app""" click.echo("Resetting db...") db.drop_all() db.create_all() click.echo("Done") @app.cli.command("create-auth-emails") @click.argument("emails", nargs=-1) def create_authorised_emails(emails): """Adds emails to the Authorised emails db""" click.echo("Emails added to authorised_email:") if emails: for email in emails: click.echo(f"\t'{email}'") new_email = AuthorisedEmail() new_email.email = email db.session.add(new_email) db.session.commit() @app.cli.command("create-user") @click.argument("username") @click.argument("password") @click.argument("email") def create_user(username, password, email): """Adds a user to the db""" click.echo(f"Creating user with email='{email}', username='{username}'") auth_email = AuthorisedEmail.query.filter_by(email=email).first() if auth_email: User.create_user(auth_email, password, username) db.session.commit() else: raise ValueError(f"{email} is not an authorised email address!")
/expenses_app/group/forms.py
from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, SelectField, FloatField, BooleanField from wtforms.validators import InputRequired class CreateGroup(FlaskForm): name = StringField("Name", [InputRequired(message="You must provide a name for the group!")]) create = SubmitField("Create") class AddUserToGroup(FlaskForm): username = StringField("Username", [InputRequired(message="You must provide a name for the user!")]) add = SubmitField("Add") class RemoveUserFromGroup(FlaskForm): username = SelectField( "Username", coerce=int, validators=[InputRequired(message="You must provide a user to remove!")]) remove = SubmitField("Remove") @classmethod def from_group(cls, group, current_user): form = cls() form.username.choices = [ (member.id, member.username) for member in group.members if member != current_user] return form class AddAccountToGroup(FlaskForm): name = StringField("Name", [InputRequired(message="You must provide a name for the account!")]) user = SelectField("User", coerce=int, default=-1) starting_balance = FloatField("Starting Balance") has_balance = BooleanField("Has Balance?", default=False) add = SubmitField("Add") @classmethod def from_group(cls, group): add_form = cls() users_with_avatars = set(account.avatar_for for account in group.accounts if account.is_avatar and account.status == "live") add_form.user.choices = [ (user.id, user.username) for user in group.members if user not in users_with_avatars ] add_form.user.choices.append((-1, "None")) return add_form class RemoveAccountFromGroup(FlaskForm): name = SelectField( "Name", coerce=int, validators=[InputRequired(message="You must provide an account to be removed!")]) remove = SubmitField("Remove") @classmethod def from_group(cls, group): remove_form = cls() remove_form.name.choices = [ (account.id, account.name) for account in group.accounts if account.status == "live" and not account.is_avatar ] return remove_form
/expenses_app/group/group.py
from flask import render_template, redirect, url_for, flash, Blueprint from flask_login import login_required, current_user from expenses_app.group.forms import CreateGroup, AddUserToGroup, RemoveUserFromGroup, AddAccountToGroup, RemoveAccountFromGroup from expenses_app.models import db, User, Group, Account grp_bp = Blueprint( 'grp_bp', __name__, template_folder='templates', static_folder='static' ) @grp_bp.route("/", methods=["GET", "POST"]) @login_required def index(): form = CreateGroup() if form.validate_on_submit(): new_group_name = form.name.data exists = Group.query.filter(Group.name == new_group_name).first() if not exists: current_user.create_group(new_group_name) db.session.commit() else: flash(f"{new_group_name} has already been taken! Try another name.") return render_template("index.html", form=form) @grp_bp.route("/groups/<group_name>/summary", methods=["GET", "POST"]) @login_required def group_summary(group_name): group = group_from_group_name(group_name) if group and group.has_user(current_user): return render_template("group_summary.html", group=group) return redirect(url_for("grp_bp.index")) @grp_bp.route("/groups/<group_name>/access", methods=["GET", "POST"]) @login_required def group_access(group_name): group = group_from_group_name(group_name) if group and group.has_user(current_user): add_form = AddUserToGroup() remove_form = RemoveUserFromGroup.from_group(group, current_user) return render_template("group_access.html", group=group, add_form=add_form, remove_form=remove_form) return render_template("index.html", group=group) @grp_bp.route("/groups/<group_name>/remove_user", methods=["POST"]) @login_required def remove_user_from_group(group_name): group = group_from_group_name(group_name) remove_form = RemoveUserFromGroup.from_group(group, current_user) if remove_form.validate_on_submit(): user_id = remove_form.username.data old_user = User.query.get(user_id) group.remove_user(old_user) db.session.commit() return redirect(url_for("grp_bp.group_access", group_name=group_name)) @grp_bp.route("/groups/<group_name>/add_user", methods=["POST"]) @login_required def add_user_to_group(group_name): group = group_from_group_name(group_name) add_form = AddUserToGroup() if add_form.validate_on_submit(): user_name = add_form.username.data new_user = User.query.filter_by(username=user_name).first() if new_user: group.add_user(new_user) db.session.commit() else: flash(f"{user_name} is not a valid username!") return redirect(url_for("grp_bp.group_access", group_name=group_name)) @grp_bp.route("/groups/<group_name>/accounts") @login_required def group_accounts(group_name): group = group_from_group_name(group_name) if group and group.has_user(current_user): add_form = AddAccountToGroup.from_group(group) remove_form = RemoveAccountFromGroup.from_group(group) return render_template("group_accounts.html", group=group, add_form=add_form, remove_form=remove_form) return redirect(url_for("grp_bp.index")) @grp_bp.route("/groups/<group_name>/add_account", methods=["POST"]) @login_required def add_account_to_group(group_name): group = group_from_group_name(group_name) add_form = AddAccountToGroup.from_group(group) if add_form.validate_on_submit(): name = add_form.name.data name_exists = Account.query.filter(Account.name == name, Account.group_id == group.id).first() if name_exists and name_exists.status == "live": flash("The account name already exists in this group!") elif name_exists: name_exists.status = "removed" db.session.commit() else: user_id = add_form.user.data user = User.query.get(user_id) if user_id > -1 else None has_balance = add_form.has_balance.data balance = add_form.starting_balance.data if has_balance else None Account.create_account(group, name, user, balance) db.session.commit() return redirect(url_for("grp_bp.group_accounts", group_name=group_name)) @grp_bp.route("/groups/<group_name>/remove_account", methods=["POST"]) @login_required def remove_account_from_group(group_name): group = group_from_group_name(group_name) remove_form = RemoveAccountFromGroup.from_group(group) if remove_form.validate_on_submit(): account_id = remove_form.name.data old_account = Account.query.get(account_id) old_account.status = "removed" db.session.commit() return redirect(url_for("grp_bp.group_accounts", group_name=group_name)) def group_from_group_name(group_name): return Group.query.filter(Group.name == group_name).first()
/expenses_app/models.py
import datetime as dt from flask_sqlalchemy import SQLAlchemy from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin db = SQLAlchemy() class AuthorisedEmail(db.Model): email_id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(120), unique=True) user = db.relationship("User", uselist=False, back_populates="email") is_registered = db.Column(db.Boolean, nullable=False, default=False) def register_user(self, user): if self.is_registered: return False self.user = user self.is_registered = True return True def __repr__(self): return f"<AuthEmail {self.email}>" group_membership_table = db.Table( "group_membership", db.metadata, db.Column("user_id", db.Integer, db.ForeignKey("user.id")), db.Column("group_id", db.Integer, db.ForeignKey("group.id")) ) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) email_id = db.Column( db.Integer, db.ForeignKey("authorised_email.email_id"), unique=True, index=True, nullable=False ) email = db.relationship("AuthorisedEmail", back_populates="user") username = db.Column(db.String(50), unique=True, nullable=False) password_hash = db.Column(db.String(128), nullable=False) time_joined = db.Column(db.DateTime, default=dt.datetime.utcnow) owned_groups = db.relationship("Group", back_populates="owner") groups = db.relationship("Group", secondary=group_membership_table, back_populates="members") def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) @classmethod def create_user(cls, email, password, username): new_user = cls() new_user.set_password(password) new_user.email = email new_user.username = username if email.register_user(new_user): return new_user else: return None def create_group(self, name): new_group = Group() new_group.name = name new_group.owner = self new_group.members.append(self) self.groups.append(new_group) def __repr__(self): return f"<User {self.username}>" class Group(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100), unique=True, index=True, nullable=False) owner_id = db.Column(db.Integer, db.ForeignKey("user.id"), index=True, nullable=False) owner = db.relationship("User", back_populates="owned_groups") members = db.relationship("User", secondary=group_membership_table, back_populates="groups") accounts = db.relationship("Account", back_populates="group") def has_user(self, user): return user in self.members def add_user(self, new_user): self.members.append(new_user) def remove_user(self, old_user): self.members.remove(old_user) def remove_account(self, old_account): self.accounts.remove(old_account) def __repr__(self): return f"<Group {self.id}, {self.name}>" class Account(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100), index=True, nullable=False) group_id = db.Column(db.Integer, db.ForeignKey("group.id"), index=True, nullable=False) group = db.relationship("Group", uselist=False, back_populates="accounts") is_avatar = db.Column(db.Boolean, nullable=False, default=False) avatar_for_user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=True) avatar_for = db.relationship("User", uselist=False) has_balance = db.Column(db.Boolean, default=False, nullable=False) starting_balance_cents = db.Column(db.Integer, nullable=True) status = db.Column(db.Enum("live", "removed", name="account_status"), nullable=False, default="live") db.UniqueConstraint("name", "group_id", name="uix_group_name") @property def starting_balance(self): return round(self.starting_balance_cents / 100, 2) if self.starting_balance is not None else None @starting_balance.setter def starting_balance(self, new_balance): if new_balance is not None: self.has_balance = True self.starting_balance_cents = round(new_balance * 100) else: self.has_balance = False self.starting_balance_cents = None @classmethod def create_account(cls, group, name, user, balance): new_account = cls() new_account.group = group new_account.name = name new_account.is_avatar = user is not None new_account.avatar_for = user new_account.starting_balance = balance return new_account class Transactions(db.Model): id = db.Column(db.Integer, primary_key=True) group_id = db.Column(db.Integer, db.ForeignKey("group.id"), nullable=False) paid_by_id = db.Column(db.Integer, db.ForeignKey("account.id"), nullable=False) on_behalf_of_id = db.Column(db.Integer, db.ForeignKey("account.id"), nullable=False) description = db.Column(db.Text(200), nullable=True) store = db.Column(db.Text(100), nullable=True) amount = db.Column(db.Float, nullable=False)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
blackout314/myrecon.py
refs/heads/master
{"/myrecon.py": ["/modules/resolve.py", "/modules/subdomains.py", "/modules/app.py"]}
└── ├── modules │ ├── app.py │ ├── functions.py │ ├── openredirect.py │ ├── resolve.py │ ├── screenshot.py │ └── subdomains.py └── myrecon.py
/modules/app.py
# I don't believe in license. # You can do whatever you want with this program. import os import sys import time from colored import fg, bg, attr class App( object ): mods = [] d_output = '' f_domains = '' f_hosts = '' f_tmphosts = '' f_dead = '' f_ips = '' f_urls = '' domains = [] n_domains = 0 hosts = [] n_hosts = 0 ips = [] n_ips = 0 dead = [] n_dead = 0 urls = [] n_urls = 0 def wait( self ): i = 0 t_chars = ['|','/','-','\\','|','/','-'] l = len(t_chars) sys.stdout.write( "\n\n" ) for n in range(100000): time.sleep( 0.5 ) sys.stdout.write( ' %s\r' % t_chars[n%l] ) def setMods( self, t_mods ): self.mods = t_mods def setOutputDirectory( self, cd_output ): self.d_output = cd_output sys.stdout.write( '[+] output directory is: %s\n' % self.d_output ) self.initFilePath() def initFilePath( self ): self.f_domains = self.d_output + '/domains' self.f_hosts = self.d_output + '/hosts' self.f_tmphosts = self.d_output + '/tmp_hosts' self.f_dead = self.d_output + '/dead' self.f_ips = self.d_output + '/ips' self.f_urls = self.d_output + '/urls' def setDomains( self, t_domains ): self.domains = t_domains self.n_domains = len(t_domains) sys.stdout.write( '%s[+] %d domains found.%s\n' % (fg('green'),self.n_domains,attr(0)) ) if self.n_domains: fp = open( self.f_domains, 'w' ) fp.write( "\n".join(self.domains) ) fp.close() sys.stdout.write( '[+] saved in %s\n' % self.f_domains ) def setHosts( self, t_hosts ): self.hosts = t_hosts self.n_hosts = len(t_hosts) sys.stdout.write( '%s[+] %d hosts found.%s\n' % (fg('green'),self.n_hosts,attr(0)) ) if self.n_hosts: fp = open( self.f_hosts, 'w' ) fp.write( "\n".join(self.hosts) ) fp.close() sys.stdout.write( '[+] saved in %s\n' % self.f_hosts ) def setIps( self, t_ips, full_output ): self.ips = t_ips self.n_ips = len(t_ips) sys.stdout.write( '%s[+] %d ips found.%s\n' % (fg('green'),self.n_ips,attr(0)) ) if self.n_ips: fp = open( self.f_ips, 'w' ) fp.write( "\n".join(t_ips) ) fp.close() sys.stdout.write( '[+] saved in %s\n' % self.f_ips ) fp = open( self.f_tmphosts, 'w' ) fp.write( full_output ) fp.close() def setDeadHosts( self, t_dead ): sys.stdout.write( '[+] %d dead hosts found, cleaning...\n' % len(t_dead) ) for host in t_dead: self.hosts.remove( host ) def createUrls( self ): sys.stdout.write( '[+] creating urls...\n' ) for host in self.hosts: self.urls.append( 'http://'+host ) self.urls.append( 'https://'+host ) for ip in self.ips: self.urls.append( 'http://'+ip ) self.urls.append( 'https://'+ip ) self.n_urls = len( self.urls ) sys.stdout.write( '%s[+] %d urls created.%s\n' % (fg('green'),self.n_urls,attr(0)) ) if self.urls: fp = open( self.f_urls, 'w' ) fp.write( "\n".join(self.urls) ) fp.close() sys.stdout.write( '[+] saved in %s\n' % self.f_urls )
/modules/functions.py
# I don't believe in license. # You can do whatever you want with this program. import os import sys import argparse import tldextract def parseargs( app, t_available_mods ): parser = argparse.ArgumentParser() parser.add_argument( "-d","--domain",help="domain, single, multiples or files", action="append" ) parser.add_argument( "-o","--output",help="output dir" ) parser.add_argument( "-m","--mod",help="mods to run, can be: resolve, screenshots, quickhits, crlf, openredirect. Default: resolve,screenshots,quickhits" ) parser.parse_args() args = parser.parse_args() if args.output: if os.path.isdir(args.output): output_dir = args.output else: try: os.makedirs( args.output ) except Exception as e: sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) ) exit() app.setOutputDirectory( args.output ) else: app.setOutputDirectory( os.getcwd() ) if args.domain: t_domains = [] for d in args.domain: if os.path.isfile(d): sys.stdout.write( '[+] loading file: %s\n' % d ) for l in open(d,'r'): l = l.strip() if isDomain(l) and l not in t_domains: t_domains.append( l ) else: if isDomain( d ) and d not in t_domains: t_domains.append( d ) if not len(t_domains): parser.error( 'domain missing' ) else: parser.error( 'domain missing' ) if args.mod: t_mods = [] for m in args.mod.split(','): if not m in t_available_mods and m != 'all': parser.error( ("mod '%s' doesn't exist" % m) ) # sys.stdout.write( "%s[-] mod %s doesn't exist.%s\n" % (fg('red'),m,attr(0)) ) else: if m == 'all': t_mods = t_available_mods break else: t_mods.append( m ) if not len(t_mods): parser.error( 'mod missing' ) else: t_mods = t_available_mods app.setDomains( t_domains ) app.setMods( t_mods ) def isDomain( str ): t_parse = tldextract.extract( str ) if t_parse.subdomain == '' and t_parse.domain != '' and t_parse.suffix != '': return True else: return False
/modules/openredirect.py
# I don't believe in license. # You can do whatever you want with this program. import os import sys import subprocess from colored import fg, bg, attr def run( app ): sys.stdout.write( '[+] running mod: openredirect\n' ) cmd = 'open-redirect.py -o ' + app.f_hosts + ' 2>&1 >/dev/null &' os.system( cmd ) # try: # cmd = 'open-redirect.py -o ' + app.f_hosts # # print(cmd) # r = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) # except Exception as e: # sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
/modules/resolve.py
# I don't believe in license. # You can do whatever you want with this program. import re import sys import subprocess from colored import fg, bg, attr from functools import partial from multiprocessing.dummy import Pool class Resolve( object ): ips = [] n_ips = 0 dead_host = [] n_dead = 0 full_output = '' def run( self, t_hosts ): sys.stdout.write( '[+] running mod: resolve...\n' ) t_multiproc = { 'n_current': 0, 'n_total': len(t_hosts) } pool = Pool( 10 ) pool.map( partial(self.resolve,t_multiproc), t_hosts ) pool.close() pool.join() self.n_ips = len(self.ips) self.n_dead = len(self.dead_host) def resolve( self, t_multiproc, host ): sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) ) t_multiproc['n_current'] = t_multiproc['n_current'] + 1 try: cmd = 'host ' + host output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, shell=True ).decode('utf-8') # print(output) # ip = socket.gethostbyname( host ) except Exception as e: # sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) ) return self.full_output = self.full_output + output + "\n" matches = re.findall( '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}', output ) if matches: for ip in matches: if not ip in self.ips: self.ips.append( ip ) else: if host not in sef.dead_host: self.dead_host.append( host )
/modules/screenshot.py
# I don't believe in license. # You can do whatever you want with this program. import os import sys import subprocess from colored import fg, bg, attr def run( app ): sys.stdout.write( '[+] running mod: screenshots\n' ) cmd = 'EyeWitness --headless -f "' + app.f_urls + '" --user-agent "Mozilla/5.0 (X11; Linux i586; rv:63.0) Gecko/20100101 Firefox/63.0" --no-prompt --threads 10 -d ' + app.d_output + '/eye 2>&1 >/dev/null &' os.system( cmd ) # try: # # print(cmd) # subprocess.Popen( cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) # except Exception as e: # sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
/modules/subdomains.py
# I don't believe in license. # You can do whatever you want with this program. import re import sys import subprocess from colored import fg, bg, attr from functools import partial from multiprocessing.dummy import Pool class Subdomains( object ): hosts = [] n_hosts = 0 def run( self, t_domains ): sys.stdout.write( '[+] looking for subdomains...\n' ) t_multiproc = { 'n_current': 0, 'n_total': len(t_domains) } pool = Pool( 3 ) pool.map( partial(self.find,t_multiproc), t_domains ) pool.close() pool.join() self.n_hosts = len(self.hosts) def find( self, t_multiproc, domain ): sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) ) t_multiproc['n_current'] = t_multiproc['n_current'] + 1 try: # cmd = 'sublist3r -d ' + domain cmd = 'findomain -t ' + domain output = subprocess.check_output( cmd, stderr=subprocess.STDOUT, shell=True ).decode('utf-8') # print(output) except Exception as e: sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) ) return # matches = re.findall( '92m([a-zA-Z0-9\._-]+\.'+domain+')', output) matches = re.findall( '([a-zA-Z0-9\._-]+\.'+domain+')', output) if matches: for sub in matches: sub = sub.strip('._- ') if sub not in self.hosts: self.hosts.append( sub )
/myrecon.py
#!/usr/bin/python3.5 # I don't believe in license. # You can do whatever you want with this program. t_available_mods = ['resolve', 'screenshot', 'quickhits', 'crlf', 'openredirect'] # # init app # from modules import functions as func from modules.app import App app = App() func.parseargs( app, t_available_mods ) # # ### # # # MOD: subdomains # from modules.subdomains import Subdomains mod = Subdomains() mod.run( app.domains ) if not mod.n_hosts: exit() app.setHosts( mod.hosts ) # # ### # # # MOD: resolve # if 'resolve' in app.mods: from modules.resolve import Resolve mod = Resolve() mod.run( app.hosts ) app.setIps( mod.ips, mod.full_output ) if mod.n_dead: app.setDeadHosts( mod.dead_host ) # # ### # # # create urls used by other tools # app.createUrls() # # ### # # # optional modules # if 'screenshot' in app.mods: from modules import screenshot screenshot.run( app ) if 'quickhits' in app.mods: from modules import quickhits quickhits.run( app ) if 'crlf' in app.mods: from modules import crlf crlf.run( app ) if 'openredirect' in app.mods: from modules import openredirect openredirect.run( app ) # # ### # # app.wait() # next # cors # google dorks # new subdomains # endpoints # gf mykeys # gf noisy # gf takeovers # final report
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
peteramazonian/simulation_project
refs/heads/master
{"/main_multi_run.py": ["/logger_multi_run.py", "/number_generator.py", "/service_station.py", "/movement.py", "/system_arrival.py", "/time_generator.py"], "/main_single_run.py": ["/number_generator.py", "/logger_single_run.py", "/service_station.py", "/movement.py", "/system_arrival.py", "/time_generator.py"], "/movement.py": ["/time_management.py", "/system_arrival.py"], "/service_station.py": ["/time_management.py"], "/system_arrival.py": ["/time_management.py"]}
└── ├── logger_multi_run.py ├── logger_single_run.py ├── main_multi_run.py ├── main_single_run.py ├── movement.py ├── number_generator.py ├── service_station.py ├── system_arrival.py ├── time_generator.py └── time_management.py
/logger_multi_run.py
import xlsxwriter from datetime import datetime class LoggerMR: def __init__(self, ss_names, replications): self.total_replications = replications self.ss_names = ss_names # Setting list of service ServiceStations time = datetime.now().strftime("%d-%m-%Y--%H-%M-%S") self.wb = xlsxwriter.Workbook('LOG_MR/LOG-' + str(self.total_replications) + 'R--' + time + '.xlsx') # Creating Excel report # file in LOG_MR folder in the project directory self.ws = self.wb.add_worksheet("all logs") # Creating a sheet inside the Excel file # Creating default format object self.default_format = self.wb.add_format(dict(font_name='Century Gothic', align='center', valign='vcenter')) # Defining a dictionary so it can be edited easily before creating a new format object self.header_format_dict = dict(font_name='Century Gothic', align='center', valign='vcenter', bold=True, font_color='navy', text_wrap=True, bg_color='silver', border=1) # Setting default format and height=14 for first 50 columns in all rows self.ws.set_column(0, 50, 14, self.default_format) # Freezing first 2 rows and first column self.ws.freeze_panes(2, 1) # Writing header for first column format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object self.ws.merge_range(0, 0, 1, 0, "Replication", format_tmp) # Writing first row in merged cell # Writing header for column 2 format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object format_tmp.set_bg_color('#CCFF99') # Changing background color of the format object self.ws.write(0, 1, "System", format_tmp) # Writing first row system_parameters = ['Average Time in System'] for col_num, cell_name in enumerate(system_parameters): # Writing second row self.ws.write(1, col_num + 1, cell_name, format_tmp) # Writing header for columns after 5 # One section for each ServiceStation. It will cover all ServiceStations automatically. color_list = ['#FF5050', '#FFFF99'] # Defining a color list to choose in a loop for each # ServiceStation so it can be separated easily for num, ss in enumerate(self.ss_names): format_tmp = self.wb.add_format(self.header_format_dict) format_tmp.set_bg_color(color_list[int(num % len(color_list))]) # Setting background color of the # format object used for this ServiceStation's header, from the color list # Parameters names list. you need to edit this if you want to change what parameters are printed in log file # Also you should change ServiceStation's "final_calculations" function # Order of parameters in ss_parameters and result dict in ServiceStations should be the same ss_parameters = ['Total Wait Time', 'Average Queue Delay', 'Average Queue Length', 'Maximum Queue Length', 'Servers Efficiency', 'Queue Busy Percentage'] i = num * len(ss_parameters) + 2 # Choose starting column self.ws.merge_range(0, i, 0, i + len(ss_parameters) - 1, ss, format_tmp) # Writing first row in # merged cell for index, cell_name in enumerate(ss_parameters): # Writing second row self.ws.write(1, index + i, cell_name, format_tmp) self.row = 2 # Setting the starting row to write logs. 3rd row is the row after header. self.replication_number = 1 def replication_logger(self, s_list, SystemArrival): # It will write the system evaluation parameters for each replication in a new row column = 0 format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object self.ws.write(self.row, column, self.replication_number, format_tmp) self.replication_number += 1 column += 1 for key, value in SystemArrival.result.items(): self.ws.write(self.row, column, value) column += 1 for ss in s_list: for key, value in ss.result.items(): self.ws.write(self.row, column, value) column += 1 self.row += 1 def result_logger(self, ss_names, result): # It will write the system evaluation parameters in a table at the end of the log file self.row += 3 # The table starts 3 rows after where log table ends column = 4 # The table starts from 5th column format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object format_tmp.set_bg_color('#29A8FF') # Changing it's background color to blue # Writing the header: self.ws.write(self.row, column, 'Scope', format_tmp) self.ws.merge_range(self.row, column + 1, self.row, column + 2, "Parameter Average", format_tmp) self.ws.write(self.row, column + 3, 'Value', format_tmp) self.row += 1 color_list = ['#FF5050', '#FFFF99'] # Used to separate parts with two colors in loop for num, ss in enumerate(ss_names): # Writing ServiceStations evaluation parameters format_tmp = self.wb.add_format(self.header_format_dict) format_tmp.set_bg_color(color_list[int(num % len(color_list))]) if len(result[num]) > 1: self.ws.merge_range(self.row, column, self.row + len(result[num]) - 1, column, ss, format_tmp) else: self.ws.write(self.row, column, ss, format_tmp) for key, value in result[num].items(): # Writing parameters name and value self.ws.merge_range(self.row, column + 1, self.row, column + 2, key, format_tmp) self.ws.write(self.row, column + 3, value / self.total_replications, format_tmp) self.row += 1 def close_file(self): # It will close and save the Excel file in the project directory self.wb.close()
/logger_single_run.py
import xlsxwriter from datetime import datetime class LoggerSR: def __init__(self, s_list): self.s_list = s_list # Setting list of service ServiceStations self.system_arrival = __import__('system_arrival').SystemArrival # Importing SystemArrival class. It # should be imported inside init to avoid circular imports time = datetime.now().strftime("%d-%m-%Y--%H-%M-%S") self.wb = xlsxwriter.Workbook('LOG_SR/LOG-SR--' + time + '.xlsx') # Creating Excel report file in LOG_SR # folder in the project directory self.ws = self.wb.add_worksheet("all logs") # Creating a sheet inside the Excel file # Creating default format object self.default_format = self.wb.add_format(dict(font_name='Century Gothic', align='center', valign='vcenter')) # Defining a dictionary so it can be edited easily before creating a new format object self.header_format_dict = dict(font_name='Century Gothic', align='center', valign='vcenter', bold=True, font_color='navy', text_wrap=True, bg_color='silver', border=1) # Setting default format and height=14 for first 50 columns in all rows self.ws.set_column(0, 50, 14, self.default_format) # Freezing first 2 rows and first 3 columns self.ws.freeze_panes(2, 3) # Writing header for first 3 columns format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object self.ws.merge_range(0, 0, 0, 2, "FEL", format_tmp) # Writing first row in merged cell fel_parameters = ["Clock", "Event Type", "Costumer_ID"] for col_num, cell_name in enumerate(fel_parameters): # Writing second row self.ws.write(1, col_num, cell_name, format_tmp) # Writing header for columns 4-5 format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object format_tmp.set_bg_color('#CCFF99') # Changing background color of the format object self.ws.merge_range(0, 3, 0, 4, "System", format_tmp) # Writing first row in merged cell system_parameters = ["Costumers Total Time", "Costumers Departured"] for col_num, cell_name in enumerate(system_parameters): # Writing second row self.ws.write(1, col_num + 3, cell_name, format_tmp) # Writing header for columns after 5 # One section for each ServiceStation. It will cover all ServiceStations automatically. color_list = ['#FF5050', '#FFFF99'] # Defining a color list to choose in a loop for each # ServiceStation so it can be separated easily for num, ss in enumerate(self.s_list): format_tmp = self.wb.add_format(self.header_format_dict) format_tmp.set_bg_color(color_list[int(num % len(color_list))]) # Setting background color of the # format object used for this ServiceStation's header, from the color list # Parameters names list. you need to edit this if you want to change what parameters are printed in log file # Also you should change ServiceStation's "return_printables" function # Order of parameters in ss_parameters and printables list in ServiceStations should be the same ss_parameters = ['Available Servers', 'Busy Servers', 'Queue Len', 'Rest in Waiting', 'Cumulative Queue Len', 'Max Queue Len', 'Total Service Time', 'Total Service Count', 'Queue Delay Cumulative', 'Queue Total Time', 'Servers Total Busy Time', 'Servers Total Available Time'] i = num * len(ss_parameters) + 5 # Choose starting column self.ws.merge_range(0, i, 0, i + len(ss_parameters) - 1, ss.name, format_tmp) # Writing first row in # merged cell for index, cell_name in enumerate(ss_parameters): # Writing second row self.ws.write(1, index + i, cell_name, format_tmp) self.row = 2 # Setting the starting row to write logs. 3rd row is the row after header. def fel_logger(self, event_notice): # It will write the event notice passed, into in the next blank row for col_num, item in enumerate(event_notice[0: -1]): self.ws.write(self.row, col_num, item) self.variable_logger() # Calling variable_logger function to log cumulative and state variables self.row += 1 # Moving to next row def variable_logger(self): # It will log cumulative and state variables for SystemArrivals and ServiceStations in # columns after 3 (where fel ends) column = 3 # Writing System variables self.ws.write(self.row, column, self.system_arrival.costumers_total_time) column += 1 self.ws.write(self.row, column, self.system_arrival.costumers_departured) column += 1 # Writing ServiceStation variables for ss in self.s_list: for item in ss.return_printables(): self.ws.write(self.row, column, item) column += 1 def result_logger(self): # It will write the system evaluation parameters in a table at the end of the log file self.row += 3 # The table starts 3 rows after where log table ends column = 4 # The table starts from 5th column format_tmp = self.wb.add_format(self.header_format_dict) # Creating a temporary format object format_tmp.set_bg_color('#29A8FF') # Changing it's background color to blue # Writing the header: self.ws.write(self.row, column, 'Scope', format_tmp) self.ws.merge_range(self.row, column + 1, self.row, column + 2, "Parameter", format_tmp) self.ws.write(self.row, column + 3, 'Value', format_tmp) self.row += 1 color_list = ['#FF5050', '#FFFF99'] # Used to separate parts with two colors in loop for num, ss in enumerate(self.s_list): # Writing ServiceStations evaluation parameters format_tmp = self.wb.add_format(self.header_format_dict) format_tmp.set_bg_color(color_list[int(num % len(color_list))]) result = ss.result # ss.result is calculated in final_calculations method in ServiceStations at the end # of simulation self.ws.merge_range(self.row, column, self.row + len(result) - 1, column, ss.name, format_tmp) for key, value in result.items(): # Writing parameters name and value self.ws.merge_range(self.row, column + 1, self.row, column + 2, key, format_tmp) self.ws.write(self.row, column + 3, value, format_tmp) self.row += 1 # Writing ServiceStations evaluation parameters: result = self.system_arrival.result # SystemArrival.result is calculated in final_calculations method in # SystemArrival at the end of simulation format_tmp = self.wb.add_format(self.header_format_dict) self.ws.write(self.row, column, "System", format_tmp) # Writing the scope column for key, value in result.items(): # Writing parameters name and value self.ws.merge_range(self.row, column + 1, self.row, column + 2, key, format_tmp) self.ws.write(self.row, column + 3, value, format_tmp) self.row += 1 def close_file(self): # It will close and save the Excel file in the project directory self.wb.close()
/main_multi_run.py
import sys import importlib from service_station import ServiceStation from system_arrival import SystemArrival from movement import Movement from time_generator import TimeGenerator from number_generator import NumberGenerator import time_management from logger_multi_run import LoggerMR replications = 100 result = [] ss_names = ['ss1', 'ss2', 'ss3'] logger = LoggerMR(ss_names, replications) i = 0 while i < replications: importlib.reload(sys.modules['service_station']) importlib.reload(sys.modules['system_arrival']) importlib.reload(sys.modules['movement']) importlib.reload(sys.modules['time_management']) from service_station import ServiceStation from system_arrival import SystemArrival from movement import Movement # Its our simulation's main file. # Here we import classes and functions from other project files. # Then we need to make objects from our classes and set attributes. # These objects are used to setup the system in a modular way. # You can make as many service stations as you need with their own attributes, then arrange the whole system # --------------------------------------------------------------------- # Creating SystemArrival objects # --------------------------------------------------------------------- # -------- First SystemArrival object -------- t_generator = TimeGenerator.Exponential(3) # Creating its TimeGenerator n_generator = NumberGenerator.Static(1) # Creating its NumberGenerator ief1 = SystemArrival("ief1", t_generator, n_generator) # Creating first SystemArrival object del n_generator, t_generator # -------- Second SystemArrival object -------- t_generator = TimeGenerator.Exponential(5) # Creating its TimeGenerator n_generator = NumberGenerator.Discrete((1, 2, 3, 4), (0.2, 0.3, 0.3, 0.2)) # Creating its NumberGenerator ief2 = SystemArrival("ief2", t_generator, n_generator) # Creating first SystemArrival object del n_generator, t_generator # -------- Third SystemArrival object -------- t_generator = TimeGenerator.Uniform(0, 120) # Creating its TimeGenerator n_generator = NumberGenerator.Poisson(30) # Creating its NumberGenerator ief3 = SystemArrival("ief3", t_generator, n_generator) # Creating first SystemArrival object del n_generator, t_generator # --------------------------------------------------------------------- # Creating ServiceStation objects # --------------------------------------------------------------------- # -------- First ServiceStation object -------- t_generator = TimeGenerator.DoubleTriangular(1, 2, 4, 1, 2, 3) # Creating its TimeGenerator ss1 = ServiceStation("ss1", t_generator, 5) # Creating first ServiceStation object del t_generator # -------- Second ServiceStation object -------- t_generator = TimeGenerator.Uniform(0.5, 2) # Creating its TimeGenerator ss2 = ServiceStation("ss2", t_generator, 2) # Creating first ServiceStation object del t_generator # -------- Third ServiceStation object -------- t_generator = TimeGenerator.Triangular(10, 20, 30) # Creating its TimeGenerator ss3 = ServiceStation("ss3", t_generator, 30) # Creating first ServiceStation object del t_generator # --------------------------------------------------------------------- # Creating Movement objects # --------------------------------------------------------------------- m1 = Movement(TimeGenerator.Static(0)) m2 = Movement(TimeGenerator.Exponential(0.5)) m3 = Movement(TimeGenerator.Exponential(0.5)) m4 = Movement(TimeGenerator.Exponential(1)) Movement.check() # --------------------------------------------------------------------- # Creating Preliminary FEL # --------------------------------------------------------------------- ief1.set_first_arrival(0) ief2.set_first_arrival(0) ief3.set_single_arrival(60) ss1.set_rest_times([50, 110, 230, 290]) ss2.set_rest_times([50, 110, 230, 290]) # --------------------------------------------------------------------- # Set Duration # --------------------------------------------------------------------- es = 300 time_management.set_end_of_simulation(es) # --------------------------------------------------------------------- # RUN! # --------------------------------------------------------------------- try: while True: time_management.advance_time() except time_management.SimulationDone: for ss in ServiceStation.list: ss.final_calculations() SystemArrival.final_calculations() logger.replication_logger(ServiceStation.list, SystemArrival) i += 1 print('#' + str(i) + ' : Simulation DONE!') if i == 1: for ss in ServiceStation.list: result.append(ss.result) result.append(SystemArrival.result) else: for j, ss in enumerate(ServiceStation.list): for key, value in ss.result.items(): result[j][key] += value for key, value in SystemArrival.result.items(): result[-1][key] += value ss_names.append('System') for num, scope in enumerate(result): for key, value in scope.items(): print("%s: %s = %s" %(ss_names[num], key, round(value / replications, 10))) logger.result_logger(ss_names, result) logger.close_file()
/main_single_run.py
from service_station import ServiceStation from system_arrival import SystemArrival from movement import Movement from time_generator import TimeGenerator from number_generator import NumberGenerator import time_management from logger_single_run import LoggerSR # Its our simulation's main file. # Here we import classes and functions from other project files. # Then we need to make objects from our classes and set attributes. # These objects are used to setup the system in a modular way. # You can make as many service stations as you need with their own attributes, then arrange the whole system together. # --------------------------------------------------------------------- # Creating SystemArrival objects # --------------------------------------------------------------------- # -------- First SystemArrival object -------- t_generator = TimeGenerator.Exponential(3) # Creating its TimeGenerator n_generator = NumberGenerator.Static(1) # Creating its NumberGenerator ief1 = SystemArrival("ief1", t_generator, n_generator) # Creating first SystemArrival object del n_generator, t_generator # -------- Second SystemArrival object -------- t_generator = TimeGenerator.Exponential(5) # Creating its TimeGenerator n_generator = NumberGenerator.Discrete((1, 2, 3, 4), (0.2, 0.3, 0.3, 0.2)) # Creating its NumberGenerator ief2 = SystemArrival("ief2", t_generator, n_generator) # Creating first SystemArrival object del n_generator, t_generator # -------- Third SystemArrival object -------- t_generator = TimeGenerator.Uniform(0, 120) # Creating its TimeGenerator n_generator = NumberGenerator.Poisson(30) # Creating its NumberGenerator ief3 = SystemArrival("ief3", t_generator, n_generator) # Creating first SystemArrival object del n_generator, t_generator # --------------------------------------------------------------------- # Creating ServiceStation objects # --------------------------------------------------------------------- # -------- First ServiceStation object -------- t_generator = TimeGenerator.DoubleTriangular(1, 2, 4, 1, 2, 3) # Creating its TimeGenerator ss1 = ServiceStation("ss1", t_generator, 5) # Creating first ServiceStation object del t_generator # -------- Second ServiceStation object -------- t_generator = TimeGenerator.Uniform(0.5, 2) # Creating its TimeGenerator ss2 = ServiceStation("ss2", t_generator, 2) # Creating first ServiceStation object del t_generator # -------- Third ServiceStation object -------- t_generator = TimeGenerator.Triangular(10, 20, 30) # Creating its TimeGenerator ss3 = ServiceStation("ss3", t_generator, 30) # Creating first ServiceStation object del t_generator # --------------------------------------------------------------------- # Creating Movement objects # --------------------------------------------------------------------- m1 = Movement(TimeGenerator.Static(0)) m2 = Movement(TimeGenerator.Exponential(0.5)) m3 = Movement(TimeGenerator.Exponential(0.5)) m4 = Movement(TimeGenerator.Exponential(1)) Movement.check() # --------------------------------------------------------------------- # Creating Loggers # --------------------------------------------------------------------- # time_management.logger_set_list(ServiceStation.list) logger = LoggerSR(ServiceStation.list) # --------------------------------------------------------------------- # Creating Preliminary FEL # --------------------------------------------------------------------- ief1.set_first_arrival(0) ief2.set_first_arrival(0) ief3.set_single_arrival(60) ss1.set_rest_times([50, 110, 230, 290]) ss2.set_rest_times([50, 110, 230, 290]) # --------------------------------------------------------------------- # Set Duration # --------------------------------------------------------------------- es = 300 time_management.set_end_of_simulation(es) # --------------------------------------------------------------------- # RUN! # --------------------------------------------------------------------- try: while True: logger.fel_logger(time_management.advance_time()) except time_management.SimulationDone: for ss in ServiceStation.list: ss.final_calculations() SystemArrival.final_calculations() logger.fel_logger((es, "ES", 0)) logger.result_logger() print("Simulation DONE!") logger.close_file()
/movement.py
import time_management from time_management import add_to_fel from system_arrival import SystemArrival ss_list = __import__('service_station').ServiceStation.list class Movement(): list = [] @classmethod def check(cls): x = len(ss_list) if len(cls.list) == x + 1: return elif len(cls.list) < x + 1: raise ValueError("Movement objects should be more") else: raise ValueError("Movement objects are more than needed") def __init__(self, moving_time_generator): self.time_generator = moving_time_generator self.position = len(Movement.list) + 1 self.name = "m" + str(self.position) Movement.list.append(self) # Overriding Python's original __repr__ function def __repr__(self): return self.name def move(self, costumer_id): if self.position <= len(ss_list): event_notice = ( self.time_generator.generate() + time_management.clock, "A" + str(self.position), costumer_id, ss_list[self.position - 1].arrival) add_to_fel(event_notice) else: event_notice = (self.time_generator.generate() + time_management.clock, "D", costumer_id, SystemArrival.departure) add_to_fel(event_notice)
/number_generator.py
import random from math import exp class NumberGenerator: class Discrete(random.Random): def __init__(self, x: tuple, fx: tuple, **kwargs): self.x = None self.fx_list = fx self.x_list = x if len(self.x_list) != len(self.fx_list): raise ValueError("x_list and fx_list should have same number of elements") for key, value in kwargs.items(): if key == "seed": setattr(self, "x", value) super().__init__(self.x) def generate(self): rnd = self.random() for i in range(self.fx_list.__len__()): if rnd < sum(self.fx_list[:i + 1]): return self.x_list[i] class Static: def __init__(self, x=0): self.x = x def generate(self): return self.x class Poisson(random.Random): def __init__(self, mean=1, **kwargs): self.x = None self.mean = mean self.e = exp(-1 * mean) for key, value in kwargs.items(): if key == "seed": setattr(self, "x", value) super().__init__(self.x) def generate(self): n = -1 p = 1 while p > self.e: p = p * self.random() n += 1 return n
/service_station.py
import time_management from time_management import add_to_fel, postponed_rest_log_editor # ---------------------------------------------------------------- # Creating class ServiceStation # Our service stations are objects of this class # Costumer arrivals, departures, servers leaving for rest and getting back to work are handled here # ---------------------------------------------------------------- # event notices created here are as follow: # station departure: (time, Di, costumer_id, method) # server rest: (time, Ri, method) # server back: (time, Bi, method) class ServiceStation: list = [] def __init__(self, name, service_time_generator, num_of_servers): self.name = name # What you call this station in real world self.service_time_generator = service_time_generator # Service_time_generator is an object of TimeGenerator cls self.num_of_servers = num_of_servers # Number of servers working in this ServiceStation self.available_servers = num_of_servers self.busy_servers = 0 # Number of busy servers at the beginning of the simulation. Usually equals to 0 self.queue_list = [] # List of costumers waiting in queue for this station. queue_list elements: # (queue_joined_time, costumer_id) self.rest_in_waiting = 0 # When there is a server waiting to finish the serve, then go to rest, this will be # equal to 1 self.server_rest_duration = 10 # How long is each server's rest duration self.position = len(ServiceStation.list) + 1 ServiceStation.list.append(self) self.m_list = __import__('movement').Movement.list self.result = {} # -------------------------------------------------------- # Variables to measure system evaluation parameters: self.q_len_cumulative = 0 self.q_len_last_clock = 0 self.q_len_max = 0 # --- self.service_total_time = 0 self.service_total_count = 0 # --- self.servers_total_busy_t = 0 # Sum of busy servers * time in different periods self.servers_busy_last_clock = 0 # Last time the busy servers number changed self.servers_total_available_t = 0 # Sum of available servers * time in different periods self.servers_available_last_clock = 0 # Last time the available servers number changed # --- self.queue_delay_cumulative = 0 # Total time costumers waited in queue # --- # TODO edit this self.queue_total_time = 0 # Overriding Python's original __repr__ function def __repr__(self): return self.name def return_printables(self): return([self.available_servers, self.busy_servers, len(self.queue_list), self.rest_in_waiting, self.q_len_cumulative, self.q_len_max, self.service_total_time, self.service_total_count, self.queue_delay_cumulative, self.queue_total_time, self.servers_total_busy_t, self.servers_total_available_t]) # Handles arrivals to this station. def arrival(self, costumer_id): if self.busy_servers < self.available_servers: # No waiting in Queue self.servers_total_busy_t += self.busy_servers * (time_management.clock - self.servers_busy_last_clock) self.servers_busy_last_clock = time_management.clock self.busy_servers += 1 event_duration = self.service_time_generator.generate() event_notice = ( event_duration + time_management.clock, "D" + str(self.position), costumer_id, self.departure) add_to_fel(event_notice) # Generating departure event for this costumer. self.service_total_time += event_duration self.service_total_count += 1 else: # Waiting in queue self.q_len_cumulative += len(self.queue_list) * (time_management.clock - self.q_len_last_clock) self.queue_total_time += int(bool(len(self.queue_list))) * (time_management.clock - self.q_len_last_clock) self.q_len_last_clock = time_management.clock self.queue_list.append((time_management.clock, costumer_id)) # Adding costumer to queue if len(self.queue_list) > self.q_len_max: self.q_len_max = len(self.queue_list) # Handles all departures from this station. departure will happen when service ends for one costumer. def departure(self, costumer_id): if not self.rest_in_waiting: # If there is no server waiting to get rest. if self.queue_list.__len__() > 0: event_duration = self.service_time_generator.generate() event_notice = ( event_duration + time_management.clock, "D" + str(self.position), self.queue_list[0][1], self.departure) add_to_fel(event_notice) # Generating departure event for next costumer waiting in queue. self.service_total_time += event_duration self.service_total_count += 1 self.q_len_cumulative += len(self.queue_list) * (time_management.clock - self.q_len_last_clock) self.queue_total_time += int(bool(len(self.queue_list))) * ( time_management.clock - self.q_len_last_clock) self.q_len_last_clock = time_management.clock self.queue_delay_cumulative += time_management.clock - self.queue_list[0][0] del self.queue_list[0] # Deleting the costumer which starts getting service, from queue. else: self.servers_total_busy_t += self.busy_servers * (time_management.clock - self.servers_busy_last_clock) self.servers_busy_last_clock = time_management.clock self.busy_servers -= 1 else: # If there is a server waiting to get rest self.servers_total_busy_t += self.busy_servers * (time_management.clock - self.servers_busy_last_clock) self.servers_busy_last_clock = time_management.clock self.busy_servers -= 1 # The server is no longer busy self.rest_in_waiting = 0 # so there is no busy server, waiting to get rest event_notice = (time_management.clock, "R" + str(self.position), self.server_rest) add_to_fel(event_notice) # Generating the new server rest event notice # Adding this new event notice to fel is necessary for fel logging self.m_list[self.position].move(costumer_id) # Handles server rest periods. in this model, server rest event notices are initialized in fel. def server_rest(self, *args): if self.busy_servers < self.available_servers: self.servers_total_available_t += self.available_servers * (time_management.clock - self.servers_available_last_clock) self.servers_available_last_clock = time_management.clock self.available_servers -= 1 event_notice = (self.server_rest_duration + time_management.clock, "B" + str(self.position), self.server_back) add_to_fel(event_notice) # Generates event notice for server coming back from rest after 10 mins. else: self.rest_in_waiting = 1 # It's used in departure() method. postponed_rest_log_editor() # Handles when a server is back from rest and starts serving a new costumer if queue is not empty. def server_back(self, *args): self.servers_total_available_t += self.available_servers * (time_management.clock - self.servers_available_last_clock) self.servers_available_last_clock = time_management.clock self.available_servers += 1 if self.queue_list.__len__() > 0: self.servers_total_busy_t += self.busy_servers * (time_management.clock - self.servers_busy_last_clock) self.servers_busy_last_clock = time_management.clock self.busy_servers += 1 event_duration = self.service_time_generator.generate() event_notice = ( event_duration + time_management.clock, "D" + str(self.position), self.queue_list[0][1], self.departure) add_to_fel(event_notice) # Generating departure event for next costumer waiting in queue. self.service_total_time += event_duration self.service_total_count += 1 self.q_len_cumulative += len(self.queue_list) * (time_management.clock - self.q_len_last_clock) self.queue_total_time += int(bool(len(self.queue_list))) * (time_management.clock - self.q_len_last_clock) self.q_len_last_clock = time_management.clock self.queue_delay_cumulative += time_management.clock - self.queue_list[0][0] del self.queue_list[0] # Deleting the costumer which starts getting service, from queue. def set_rest_times(self, rest_times_list): for t in rest_times_list: event_notice = (t, "R" + str(self.position), self.server_rest) add_to_fel(event_notice) def final_calculations(self): self.q_len_cumulative += len(self.queue_list) * (time_management.clock - self.q_len_last_clock) self.queue_total_time += int(bool(len(self.queue_list))) * (time_management.clock - self.q_len_last_clock) self.servers_total_busy_t += self.busy_servers * (time_management.clock - self.servers_busy_last_clock) self.servers_total_available_t += self.available_servers * ( time_management.clock - self.servers_available_last_clock) self.result = dict( total_wait_time=(self.service_total_time + self.queue_delay_cumulative) / self.service_total_count, average_queue_delay=self.queue_delay_cumulative / self.service_total_count, average_queue_length=self.q_len_cumulative / time_management.clock, maximum_queue_length=self.q_len_max, servers_efficiency=self.servers_total_busy_t / self.servers_total_available_t, queue_busy_percentage=self.queue_total_time / time_management.clock )
/system_arrival.py
import time_management from time_management import add_to_fel __id__ = 10000 # TODO new arrivals in fel dont have id?!? def id_generator(): global __id__ __id__ += 1 return __id__ class SystemArrival: list = [] costumers_inside_dict = {} costumers_departured = 0 costumers_total_time = 0 result = {} @classmethod def departure(cls, costumer_id): cls.costumers_departured += 1 cls.costumers_total_time += time_management.clock - cls.costumers_inside_dict[costumer_id] cls.costumers_inside_dict.pop(costumer_id) def __init__(self, name, inter_arrival_time_generator, number_of_arrivals_generator): self.name = name self.time_generator = inter_arrival_time_generator self.number_generator = number_of_arrivals_generator SystemArrival.list.append(self) self.m_list = __import__('movement').Movement.list # Overriding Python's original __repr__ function def __repr__(self): return self.name def set_first_arrival(self, beginning_time): event_notice = ( self.time_generator.generate() + beginning_time, self.name, self.number_generator.generate(), self.new_arrival) add_to_fel(event_notice) def new_arrival(self, number_of_arrivals): for i in range(number_of_arrivals): id_tmp = id_generator() SystemArrival.costumers_inside_dict[id_tmp] = time_management.clock self.m_list[0].move(id_tmp) # generating next arrival event event_notice = ( self.time_generator.generate() + time_management.clock, self.name, self.number_generator.generate(), self.new_arrival) add_to_fel(event_notice) def set_single_arrival(self, beginning_time): event_notice = ( self.time_generator.generate() + beginning_time, self.name, self.number_generator.generate(), self.new_single_arrival) add_to_fel(event_notice) def new_single_arrival(self, number_of_arrivals): for i in range(number_of_arrivals): id_tmp = id_generator() SystemArrival.costumers_inside_dict[id_tmp] = time_management.clock self.m_list[0].move(id_tmp) @classmethod def final_calculations(cls): cls.result = dict(average_time_in_system=cls.costumers_total_time / cls.costumers_departured)
/time_generator.py
""" Random time generators to be used for inter arrival time or activity time in simulation models. """ import random from math import sqrt, log class TimeGenerator: class Uniform(random.Random): def __init__(self, lower_limit=0, upper_limit=1, **kwargs): self.x = None self.lower_limit = lower_limit self.upper_limit = upper_limit for key, value in kwargs.items(): if key == "seed": setattr(self, "x", value) super().__init__(self.x) def generate(self): return round(self.random() * (self.upper_limit - self.lower_limit) + self.lower_limit, 3) class Static(): def __init__(self, x=0): self.x = x def generate(self): return self.x class Exponential(random.Random): def __init__(self, mean=1, **kwargs): self.x = None self.rate = 1 / mean for key, value in kwargs.items(): if key == "seed": setattr(self, "x", value) super().__init__(self.x) def generate(self): rnd = self.random() return round(-1 / self.rate * log(rnd), 3) class Triangular(random.Random): def __init__(self, lower_limit=0, mode=.5, upper_limit=1, **kwargs): self.x = None self.a = lower_limit self.b = upper_limit self.c = mode self.Fc = (self.c - self.a) / (self.b - self.a) for key, value in kwargs.items(): if key == "seed": setattr(self, "x", value) super().__init__(self.x) def generate(self): rnd = self.random() if rnd < self.Fc: return round(self.a + sqrt(rnd * (self.b - self.a) * (self.c - self.a)), 3) return round(self.b - sqrt((1 - rnd) * (self.b - self.a) * (self.b - self.c)), 3) class DoubleTriangular(random.Random): def __init__(self, lower_limit_1=0, mode_1=0.5, upper_limit_1=1, lower_limit_2=0, mode_2=.5, upper_limit_2=1, **kwargs): self.x = None self.a1 = lower_limit_1 self.b1 = upper_limit_1 self.c1 = mode_1 self.a2 = lower_limit_2 self.b2 = upper_limit_2 self.c2 = mode_2 self.Fc1 = (self.c1 - self.a1) / (self.b1 - self.a1) self.Fc2 = (self.c2 - self.a2) / (self.b2 - self.a2) for key, value in kwargs.items(): if key == "seed": setattr(self, "x", value) super().__init__(self.x) def generate(self): rnd1 = self.random() rnd2 = self.random() if rnd1 < self.Fc1: t1 = round(self.a1 + sqrt(rnd1 * (self.b1 - self.a1) * (self.c1 - self.a1)), 3) else: t1 = round(self.b1 - sqrt((1 - rnd1) * (self.b1 - self.a1) * (self.b1 - self.c1)), 3) if rnd2 < self.Fc2: t2 = round(self.a2 + sqrt(rnd2 * (self.b2 - self.a2) * (self.c2 - self.a2)), 3) else: t2 = round(self.b2 - sqrt((1 - rnd2) * (self.b2 - self.a2) * (self.b2 - self.c2)), 3) return t1 + t2 class DT: def __init__(self, triangular_obj_1, triangular_obj_2): self.t1 = triangular_obj_1 self.t2 = triangular_obj_2 def generate(self): return self.t1.generate() + self.t2.generate()
/time_management.py
import bisect # ---------------------------------------------------------------------------------------------------------------- # In this module we handle anything related to FEL and clock. in another word this module is the engine that makes # the code to move. # ---------------------------------------------------------------------------------------------------------------- fel = [] # It's our simulation's main Future Event List. clock = 0 # It is the clock that we are in it right now, trying to handle future events and advance time. def add_to_fel(event_notice: tuple): # This func will add a given tuple to our FEL, in the right place based on # event's clock. try: bisect.insort_left(fel, event_notice) # Bisect library provides a very efficient algorithm to add an object # in the right place in a SORTED list of objects to keep it sorted. except TypeError: # It will be used when two tuples are very exactly same except their functions passed. # bisect cant compare functions so it will return an error. After all it's some how impossible for two events # to be that much same. fel.append(event_notice) fel.sort(key=lambda x: x[0]) class SimulationDone(Exception): # It's an exception class that will raise the SimulationDone exception when we want. pass def es(*args): # es is short form for End of Simulation. It will throw "SimulationDone" Exception when called. raise SimulationDone def set_end_of_simulation(es_time): # It will add an "es" event to the fel at clock = es_time add_to_fel((es_time, es)) def advance_time(): # This function will check the FEL, and handle the very upcoming event and advances the clock. global clock tmp = fel[0] # Using tmp and delete the event notice from fel before handling it is necessary when we want to add # event notices to the current clock. E.g. when moving time between two parts equals 0 del fel[0] clock = tmp[0] # Sets the clock to current event's clock. tmp[-1](tmp[-2]) # Calls the event notice's method which is placed in the last element of the tuple, with passing # the one before the last element as it's argument. the argument is mostly the user_ID return tmp # It will return the event notice just handled to the main file. it's used to log the event notice.
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
leonsooi/ngskintoolsplus
refs/heads/master
{"/ngSkinToolsPlus/ngSkinToolsPlus/utilities/copyLayers.py": ["/ngSkinToolsPlus/ngSkinToolsPlus/utilities/influenceAssociation.py"]}
└── └── ngSkinToolsPlus └── ngSkinToolsPlus ├── lib │ └── weights.py ├── misc │ └── tests.py └── utilities ├── __init__.py ├── copyLayers.py ├── influenceAssociation.py └── surfaceAssociation.py
/ngSkinToolsPlus/ngSkinToolsPlus/lib/weights.py
''' Created on Mar 25, 2014 @author: Leon ''' import maya.cmds as mc import pymel.core as pm import utils.rigging as rt mel = pm.language.Mel() def smoothLayerMask(mll, layerId, intensity=1.0): ''' ''' mll.setCurrentLayer(layerId) mll.ngSkinLayerCmd(cpt='mask') # if intensity is larger than 1.0, use multiple iterations while intensity > 1.0: mll.ngSkinLayerCmd(paintOperation=4, paintIntensity=1.0) mll.ngSkinLayerCmd(paintFlood=True) intensity = intensity - 1.0 mll.ngSkinLayerCmd(paintOperation=4, paintIntensity=intensity) mll.ngSkinLayerCmd(paintFlood=True) def relaxLayerWeights(mll, layerId, expand=True, relaxSteps=33, relaxSize=0.1708, relaxAllAmount=0.5): ''' relax all weights on layer verts closest to joints will be masked expand will expand the mask ''' infIter = mll.listLayerInfluences(2) infs = list(infIter) mesh = mll.getTargetInfo()[0] mesh = pm.PyNode(mesh) # get vert closest to joints vertIds = [] for inf, infId in infs: pos = pm.PyNode(inf).getTranslation(space='world') faceId = mesh.getClosestPoint(pos, space='world')[1] faceVertIds = mesh.f[faceId].getVertices() closestVertId = min(faceVertIds, key=lambda vtxId: (mesh.vtx[vtxId].getPosition() - pos).length()) closestVertIds = [closestVertId] # expand ids if needed if expand: connectedVertices = mesh.vtx[closestVertId].connectedVertices() closestVertIds += [vtx.index() for vtx in connectedVertices] vertIds += closestVertIds vertCount = mll.getVertCount() # invert vertIds # just use strings since we're going to pass into ngSkinRelax vertsToRelax = [mesh.name() + '.vtx[%d]' % id_ for id_ in range(vertCount) if id_ not in vertIds] args = {} args['numSteps']=relaxSteps args['stepSize']=relaxSize mel.ngSkinRelax(vertsToRelax,**args) # run another relax to clean up any artefacts # relax all args = {} args['numSteps']=int(relaxSteps * relaxAllAmount) args['stepSize']=relaxSize * relaxAllAmount mel.ngSkinRelax(mesh.name(),**args) def createWeightsListByPolyStrip(outerXfos, innerXfos, mesh, loops=0): ''' returns weights as a float list innerVerts will be weighted to 1 loops [int]: number of loops the polyStrip will have 0 means an instant falloff from inner to outer 1 means falloff after 50%, etc... ''' polyStrip, outerVerts, innerVerts = createPolyLoftStrip('temp_weights_', outerXfos, innerXfos, loops) # bind polystrip to temporary jnts pm.select(cl=True) weightBnd = pm.joint(n='temp_weight_bnd') pm.select(cl=True) noWeightBnd = pm.joint(n='temp_noWeight_bnd') sknStrip = pm.skinCluster(weightBnd, noWeightBnd, polyStrip) # set weights to polystrip sknStrip.setWeights(innerVerts, [0], [1]) sknStrip.setWeights(outerVerts, [1], [1]) # create temp mesh to transfer weights to tempMesh = pm.duplicate(mesh)[0] sknMesh = pm.skinCluster(weightBnd, noWeightBnd, tempMesh) pm.copySkinWeights(ss=sknStrip, ds=sknMesh, ia='oneToOne', sa='closestPoint', nm=1) # add weights to a float list weightsIter = sknMesh.getWeights(tempMesh, 0) weightsList = list(weightsIter) # cleanup pm.delete(polyStrip, tempMesh, weightBnd, noWeightBnd) return weightsList def createPolyLoftStrip(name, outerXfos, innerXfos, loops): ''' name [string] takes two lists of transforms (as strings), returns poly mesh lofted between the two loops also returns two lists of verts (outer and inner) ''' # create curves outerCrv = rt.makeCrvThroughObjs(outerXfos, 'temp_outer_crv', False, 3) mc.closeCurve(outerCrv, preserveShape=0, rpo=True) innerCrv = rt.makeCrvThroughObjs(innerXfos, 'temp_inner_crv', False, 3) mc.closeCurve(innerCrv, preserveShape=0, rpo=True) outerCrv = pm.PyNode(outerCrv) innerCrv = pm.PyNode(innerCrv) # rebuild crvs so they can be lofted maxSpans = max(outerCrv.numSpans(), innerCrv.numSpans()) pm.rebuildCurve(outerCrv, rpo=True, spans=maxSpans, kr=2) pm.rebuildCurve(innerCrv, rpo=True, spans=maxSpans, kr=2) # loft crvs polyStrip = pm.loft(outerCrv, innerCrv, d=1, n=name+'polyStrip_geo', polygon=1, ch=0, sectionSpans=loops+1)[0] outerVerts = polyStrip.vtx[0:maxSpans-1] innerVerts = polyStrip.vtx[maxSpans:] # cleanup pm.delete(outerCrv, innerCrv) return polyStrip, outerVerts, innerVerts
/ngSkinToolsPlus/ngSkinToolsPlus/misc/tests.py
''' Created on Mar 22, 2014 @author: Leon ''' import maya.cmds as mc # assign weights by nearest joint args = {} args['bnj'] = True selJnts = ['joint1', 'joint2'] args['ij'] = '/'.join(selJnts) args['intensity'] = 1.0 # must select mesh or components first # must select layer or mll.setCurrentLayer(layerId) mc.ngAssignWeights(**args) selJnts = [] mc.ngAssignWeights(bnj=True, ij='/'.join(selJnts), intensity=1.0)
/ngSkinToolsPlus/ngSkinToolsPlus/utilities/__init__.py
from ngSkinTools.mllInterface import MllInterface from ngSkinTools.importExport import LayerData, Layer, Influence from ngSkinTools.importExport import XmlImporter import maya.cmds as cmds import pymel.core as pm ''' Quick hacks just to get the job done when needed. Should be modularized properly some time... ''' ''' USEFUL UTILITIES ''' def soloLayer(mll, layerId): ''' enabled layerId, disables the rest ''' allLayers = mll.listLayers() for curLayerId, _layerName in allLayers: mll.setLayerEnabled(curLayerId, layerId==curLayerId) def unifyMask(vertsList, mll, layerId): ''' average mask weights on vertsList ''' maskWts = mll.getLayerMask(layerId) vertsNum = len(vertsList) vertsWeights = [maskWts[vertId] for vertId in vertsList] avgWeight = sum(vertsWeights)/vertsNum for vertId in vertsList: maskWts[vertId] = avgWeight mll.setLayerMask(layerId, maskWts) def reverseMask(mll, layerId): ''' ''' maskWts = mll.getLayerMask(layerId) revMaskWts = [1-wt for wt in maskWts] mll.setLayerMask(layerId, revMaskWts) ''' utilities to print out layer data (for troubleshooting purposes, etc) also, if the influence long names have changed we can use this to find match influences by shortName, and update the longNames obviously, it assumes that names are unique! ''' def loadXmlFile(filepath): ''' # example process for importing XML data filepath = r"C:\Users\Leon\Documents\maya\projects\Ori\scenes\ori_body_weights_v037.xml" xml = loadXmlFile(filepath) from ngSkinTools.importExport import XmlImporter # create layers from importer = XmlImporter() # load layer data data = importer.process(xml) ''' f = open(filepath, 'r') try: contents = ''.join(f.readlines()) finally: f.close(); importer = XmlImporter() data = importer.process(contents) return data def findUnmatchedInfluences(data, printOut=True): ''' parse data and find unmatchable influences findUnmatchedInfluences(data) ''' unmatched = [] for influence in data.getAllInfluences(): if not pm.objExists(influence): unmatched.append(influence.split('|')[-1]) if printOut: print unmatched return unmatched def matchInfluencesByNodeName(data): ''' match influences by nodeName assumes all influence names are unique matchInfluencesByNodeName(data) data.saveTo('GEO:CT_body_geo') ''' for layer in data.layers: for influence in layer.influences: oldLongName = influence.influenceName shortName = oldLongName.split('|')[-1] newLongName = cmds.ls(shortName, l=True)[0] influence.influenceName = newLongName def editJson(jsonDict): for eachLayer in jsonDict["layers"]: print eachLayer["name"] for eachInf in eachLayer["influences"]: print eachInf["name"] name = eachInf["name"] shortName = name.split('|')[-1] print shortName replacedName = shortName.replace('CT_', 'CT_lips_') replacedName = shortName.replace('LT_', 'LT_lips_') replacedName = shortName.replace('RT_', 'RT_lips_') eachInf["name"] = replacedName def editLayerData(data): for eachLayer in data.layers: print eachLayer.name for eachInfluence in eachLayer.influences: print eachInfluence.influenceName oldLongName = eachInfluence.influenceName oldShortName = oldLongName.split('|')[-1] print oldShortName newShortName = oldShortName.replace('T_', 'T_lips_') print newShortName newLongName = cmds.ls(newShortName, l=True)[0] print newLongName eachInfluence.influenceName = newLongName def selectInfluencesInLayerData(data): cmds.select(cl=True) for eachLayer in data.layers: print eachLayer.name for eachInfluence in eachLayer.influences: print eachInfluence.influenceName cmds.select(eachInfluence.influenceName, add=True) def retModel(jsonDict): model = LayerData() if jsonDict.has_key("manualInfluenceOverrides"): model.mirrorInfluenceAssociationOverrides = jsonDict['manualInfluenceOverrides'] for layerData in jsonDict["layers"]: layer = Layer() model.addLayer(layer) layer.enabled = layerData['enabled'] layer.mask = layerData['mask'] layer.name = layerData['name'] layer.opacity = layerData['opacity'] layer.influences = [] for influenceData in layerData['influences']: influence = Influence() layer.addInfluence(influence) influence.weights = influenceData['weights'] influence.logicalIndex = influenceData['index'] influence.influenceName = influenceData['name'] return model
/ngSkinToolsPlus/ngSkinToolsPlus/utilities/copyLayers.py
''' Created on 31/08/2013 @author: Leon ''' from ngSkinTools.mllInterface import MllInterface from ngSkinToolsPlus.utilities.influenceAssociation import InfluenceAssociation import re class CopyLayers: ''' Copy layers across mesh utility ''' def __init__(self): ''' Constructor ''' self.layerIds = [] self.srcMll = MllInterface self.destMll = MllInterface self.copyIds = [] def setMllInterface(self, srcMll, destMll): self.srcMll = srcMll self.destMll = destMll def copyLayer(self, layerId): ''' copies single layer example use: testCopy = CopyLayers() srcMll = MllInterface() srcMll.setCurrentMesh('pPlane1') destMll = MllInterface() destMll.setCurrentMesh('pPlane3') testCopy.setMllInterface(srcMll, destMll) testCopy.copyLayer(3) ''' oldName = self.srcMll.getLayerName(layerId) newLayer = self.destMll.createLayer(self.createUniqueName(oldName)) self.destMll.setLayerMask(newLayer, self.srcMll.getLayerMask(layerId)) influenceMatcher = InfluenceAssociation(self.srcMll.listLayerInfluences(layerId, True), self.destMll.listLayerInfluences(0, False), "name") for _, influenceIndex in self.srcMll.listLayerInfluences(layerId): weights = self.srcMll.getInfluenceWeights(layerId, influenceIndex) self.destMll.setInfluenceWeights(newLayer, influenceMatcher[influenceIndex], weights) self.copyIds.append(layerId) # two procedures copied from DuplicateLayers class # for getting a unique name for the new layer def createLayerName(self,oldName): prefix=" copy" # copy already? add index if oldName.endswith(prefix): return oldName+"(2)" # indexing exists? increase value s=re.search('(.*)\\((\\d+)\\)$',oldName) if s!=None: return s.group(1)+"(%d)"%(int(s.group(2))+1,) # nothing? just add default copy prefix then return oldName+prefix def createUniqueName(self,fromName): layerNames = [l[1] for l in self.destMll.listLayers()] result = self.createLayerName(fromName) while result in layerNames: result = self.createLayerName(result) return result
/ngSkinToolsPlus/ngSkinToolsPlus/utilities/influenceAssociation.py
''' Created on 01/09/2013 @author: Leon ''' class InfluenceAssociation(): ''' classdocs ''' def __init__(self, srcInfluences, destInfluences, method): ''' srcInfluences and destInfluences are iterators returned from mll.listLayerInfluences(layerId, activeInfluences=False) (activeInfluences should be set to False, in case we need to match to influences that are currently inactive.) method - match by "name", "label", "closestJoint", "closestBone", or "oneToOne" ''' # create a dictionary with the format - # {influenceIndex on srcMll : influenceIndex on destMll,...} self.matchDict = {} if method == 'name': self.matchByName(srcInfluences, destInfluences) def matchByName(self, srcInfluences, destInfluences): ''' ''' srcDict = {} for influenceName, influenceIndex in srcInfluences: srcDict[influenceIndex] = influenceName destDict = {} for influenceName, influenceIndex in destInfluences: destDict[influenceName] = influenceIndex for influenceIndex, influenceName in srcDict.items(): # search for the same name in destDict if influenceName in destDict.keys(): # index of srcInfluence = index of destInfluence self.matchDict[influenceIndex] = destDict[influenceName] def __getitem__(self, srcIndex): return self.matchDict[srcIndex]
/ngSkinToolsPlus/ngSkinToolsPlus/utilities/surfaceAssociation.py
''' Created on 01/09/2013 @author: Leon ''' import pymel.core as pm class SurfaceAssociation(): ''' classdocs ''' def __init__(self, srcMesh, destMesh, method): ''' Constructor ''' # create dictionary with the format - # {vertexId on srcMesh : ((vertexId1 on destMesh, weight), (vertexId2 on destMesh, weight)...), ...} self.matchDict = {} # convert to PyNodes srcMesh = pm.PyNode(srcMesh) destMesh = pm.PyNode(destMesh) if method == 'closestComponent': self.matchByClosestComponent(srcMesh, destMesh) def matchByClosestComponent(self, srcMesh, destMesh): ''' populate self.matchDict '''
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
praneshsaminathan/url_shortener
refs/heads/main
{"/core/serializers.py": ["/core/models.py"], "/core/views.py": ["/core/models.py", "/core/serializers.py"], "/core/urls.py": ["/shorturl/utils/apps.py", "/core/views.py"]}
└── ├── core │ ├── migrations │ │ └── 0001_initial.py │ ├── models.py │ ├── serializers.py │ ├── urls.py │ └── views.py └── shorturl └── utils └── apps.py
/core/migrations/0001_initial.py
# Generated by Django 3.1.7 on 2021-03-01 15:58 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='URLInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('full_url', models.URLField(help_text='full url', unique=True)), ('url_hash', models.URLField(help_text='short url', unique=True)), ('clicks', models.PositiveIntegerField(default=0)), ('created_at', models.DateTimeField(auto_now_add=True)), ], options={ 'verbose_name': 'UrlInfo', 'verbose_name_plural': 'UrlInfo', 'db_table': 'urlinfo', }, ), ]
/core/models.py
from django.db import models from django.utils.translation import ugettext_lazy as _ class URLInfo(models.Model): full_url = models.URLField(unique=True, null=False, blank=False, help_text=_('full url')) url_hash = models.URLField(unique=True, null=False, blank=False, help_text=_('short url')) clicks = models.PositiveIntegerField(default=0) created_at = models.DateTimeField(auto_now_add=True) def __str__(self): return f'{self.url_hash} - {self.full_url} - {self.clicks}' class Meta: db_table = 'urlinfo' verbose_name = _('UrlInfo') verbose_name_plural = _('UrlInfo')
/core/serializers.py
from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from core.models import URLInfo class ShorterIRLSerializer(serializers.Serializer): url = serializers.URLField(max_length=250, min_length=None, allow_blank=False, label=_('URL'), help_text=_('URL'))
/core/urls.py
from django.urls import path, include from shorturl.utils.apps import get_api_url from .views import ( ShorterAPIView, GetFullURLAPIView ) urlpatterns = [ path(get_api_url(url_name='url-shorten'), ShorterAPIView.as_view(), name='api-url_shorten'), path(get_api_url(url_name='full-url/<str:url_hash>'), GetFullURLAPIView.as_view(), name='api-full_url') ]
/core/views.py
from hashlib import md5 from django.shortcuts import render from rest_framework.permissions import AllowAny from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView from core.serializers import ShorterIRLSerializer from .models import URLInfo class ShorterAPIView(APIView): serializer_class = ShorterIRLSerializer permission_classes = (AllowAny,) def post(self, request, *args, **kwargs): serializer = self.serializer_class(data=request.data) serializer.is_valid(raise_exception=True) url_info = URLInfo.objects.filter(full_url=serializer.validated_data.get('url')) if not url_info: url_info = URLInfo.objects.create(full_url=serializer.validated_data.get('url'), url_hash=md5(serializer.validated_data.get('url').encode()).hexdigest()[:10] ) else: url_info = url_info.first() data = { "full_url": url_info.full_url, "hash_url": url_info.url_hash, "clicks": url_info.clicks } return Response(data, status=status.HTTP_200_OK) class GetFullURLAPIView(APIView): permission_classes = (AllowAny,) def get(self, request, url_hash, *args, **kwargs): url_info = URLInfo.objects.filter(url_hash=url_hash) if url_info: url_info = url_info.first() data = { "full_url": url_info.full_url, "hash_url": url_info.url_hash, "clicks": url_info.clicks } return Response(data, status=status.HTTP_200_OK) return Response(status=status.HTTP_404_NOT_FOUND)
/shorturl/utils/apps.py
from django.conf import settings def get_api_url(name='api', version=settings.API_VERSION, app_name='', url_name=''): url = '{0}/{1}/'.format(name, version) if app_name and url_name: url = '{0}{1}/{2}/'.format(url, app_name, url_name) elif app_name and not url_name: url = '{0}{1}/'.format(url, app_name) elif url_name and not app_name: url = '{0}{1}/'.format(url, url_name) return url
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
shacharr/roomba_sim
refs/heads/master
{"/roomba_model.py": ["/cleaning_robot_model.py"], "/controller.py": ["/simulator.py"]}
└── ├── arena_model.py ├── arena_view.py ├── cleaning_robot_model.py ├── controller.py ├── helper_functions.py ├── roomba_model.py └── simulator.py
/arena_model.py
import pygame from helper_functions import * class RoomModel(object): DIRTY_COLOR = (0,255,0) CLEAN_COLOR = (0,0,255) DEAD_ZONE_COLOR = (0,0,0) def __init__(self, polygon, obstacles=[]): self.polygon = polygon self.obstacles = obstacles max_x = max([x[0] for x in polygon]) max_y = max([x[1] for x in polygon]) self.state = pygame.Surface((max_x,max_y)) self.state.fill(self.DEAD_ZONE_COLOR) pygame.draw.polygon(self.state,self.DIRTY_COLOR,polygon) for p in obstacles: pygame.draw.polygon(self.state,self.DEAD_ZONE_COLOR,p) self.clean_count, self.dirty_count = self.count_clean_dirty(0,0,max_x,max_y) def clean_box(self, len_x, len_y, direction, mid_point): # Start at zero-coords coords = [(-len_x/2,-len_y/2),( len_x/2,-len_y/2), ( len_x/2, len_y/2),(-len_x/2, len_y/2)] #Rotate coords = rotate_polygon(coords,direction) #Move coords = transpose_polygon(coords,mid_point) self.clean_polygon(coords) def clean_polygon(self, corners): bbox = polygon_bbox(corners) orig_clean,orig_dirty = self.count_clean_dirty(*bbox) pygame.draw.polygon(self.state,self.CLEAN_COLOR,corners) new_clean,new_dirty = self.count_clean_dirty(*bbox) self.clean_count += (new_clean - orig_clean) self.dirty_count += (new_dirty - orig_dirty) def is_coliding(self, loc, size): for p in [self.polygon] + self.obstacles: if is_circle_coliding_with_poligon(p, loc, size): return True return False def count_clean_dirty(self,start_x,start_y,end_x,end_y): clean_count = 0 dirty_count = 0 start_x = int(max(start_x-1,0)) max_x = self.state.get_clip().width delta_x = int(min(end_x+1,max_x)) - start_x start_y = int(max(start_y-1,0)) max_y = self.state.get_clip().height delta_y = int(min(end_y+1,max_y)) - start_y if delta_x <= 0 or delta_y <= 0: return (0,0) rect = pygame.Rect(start_x,start_y, delta_x,delta_y) sub_surf = self.state.subsurface(rect) ar = pygame.PixelArray(sub_surf) for x in range(delta_x): for y in range(delta_y): if ar[x,y] == self.state.map_rgb(self.DIRTY_COLOR): dirty_count += 1 elif ar[x,y] == self.state.map_rgb(self.CLEAN_COLOR): clean_count += 1 del ar,sub_surf return (clean_count,dirty_count) def is_good_start_point(self, loc, size): ar = pygame.PixelArray(self.state) if ar[loc[0],loc[1]] == self.state.map_rgb(self.DEAD_ZONE_COLOR): return False if self.is_coliding(loc, size): return False return True
/arena_view.py
import time import pygame import math from helper_functions import * class ScreenView(object): WHITE = (255,255,255) BLACK = ( 0, 0, 0) BLUE = ( 0, 0,255) GREEN = ( 0,255, 0) RED = (255, 0, 0) ARROW_RELATIVE_COORDS = ((0,0.8),(0.4,0.5),(0.2,0.5),(0.2,-0.6), (-0.2,-0.6),(-0.2,0.5),(-0.4,0.5),(0,0.8)) def __init__(self, roomba_size, screen_size): self.screen = pygame.display.set_mode(screen_size) self.roomba_size = roomba_size self.arrow_scaled_coords = tuple((tuple((y*roomba_size for y in x)) for x in self.ARROW_RELATIVE_COORDS)) def clear_screen(self,room_surface): pygame.display.flip() self.screen.fill(self.WHITE) self.screen.blit(room_surface,(0,0)) def draw_roomba(self,mid_point, direction, trace): pygame.draw.circle(self.screen, self.RED, mid_point, self.roomba_size) rotated_arrow = tuple(rotate(coords, direction) for coords in self.arrow_scaled_coords) transposed_arrow = tuple((tuple((y1+y2 for (y1,y2) in zip(x,mid_point))) for x in rotated_arrow)) pygame.draw.polygon(self.screen, self.BLACK, transposed_arrow) pygame.draw.aalines(self.screen, self.RED, False, trace) def testView(): pygame.init() clock = pygame.time.Clock() view = ScreenView(50) done = False for i in range(0,360*10): clock.tick(30) for event in pygame.event.get(): # User did something #print "Got event",event,"type:",event.type if event.type == pygame.QUIT: # If user clicked close done=True if done: break view.draw_roomba((100,100),i * math.pi / 180. ) view.clear_screen() #time.sleep(10) if __name__ == "__main__": testView()
/cleaning_robot_model.py
import math from helper_functions import * class CleaningRobotModel(object): TURN_STEP_FOR_DRAWING = math.pi/18. def __init__(self, location, size, cleaning_head_size, direction, speed, room): self.loc = location self.direction = direction self.speed = speed self.size = size self.room = room self.cleaning_head_size = cleaning_head_size self.trace = [location] def calc_move_next_loc(self): x,y = self.loc step_x = -self.speed * math.sin(self.direction) step_y = self.speed * math.cos(self.direction) return (x+step_x, y+step_y) def check_move(self): new_loc = self.calc_move_next_loc() return self.room.is_coliding(new_loc,self.size) def move(self): new_loc = self.calc_move_next_loc() # Assumes speed is slow enough to prevent quantom tunneling of the roomba... if not self.room.is_coliding(new_loc,self.size): mid_point = [(x+y)/2. for x,y in zip(new_loc,self.loc)] self.room.clean_box(self.size*1.9, self.speed, self.direction, mid_point) self.loc = new_loc self.trace.append(new_loc) return False return True def clean_step(self,initial_step,step_size): delta_x = self.size * self.cleaning_head_size / 2. cleaned_triangle_1 = [(0,0), (delta_x,0), rotate((delta_x,0), step_size)] cleaned_triangle_2 = [(0,0), (-delta_x,0), rotate((-delta_x,0), step_size)] cleaned_triangle_1 = rotate_polygon(cleaned_triangle_1, self.direction+initial_step) cleaned_triangle_2 = rotate_polygon(cleaned_triangle_2, self.direction+initial_step) cleaned_triangle_1 = transpose_polygon(cleaned_triangle_1,self.loc) cleaned_triangle_2 = transpose_polygon(cleaned_triangle_2,self.loc) self.room.clean_polygon(cleaned_triangle_1) self.room.clean_polygon(cleaned_triangle_2) def turn(self, relative_direction): step = 1 if relative_direction < 0: step = -1 target_step = abs(int(relative_direction/self.TURN_STEP_FOR_DRAWING)) for turn_step in range(0,target_step+1): self.clean_step(step*turn_step*self.TURN_STEP_FOR_DRAWING, step*self.TURN_STEP_FOR_DRAWING) self.clean_step(step*target_step*self.TURN_STEP_FOR_DRAWING, relative_direction - step*target_step*self.TURN_STEP_FOR_DRAWING) self.direction += relative_direction def step(self): raise Exception("Pure virtual function called") def get_draw_info(self): return ([int(x) for x in self.loc],self.direction,self.trace)
/controller.py
import matplotlib.pyplot from simulator import run_simulation from helper_functions import * #ROOM_POLYGON = [(0,0),(640,0),(640,480),(0,480)] #ROOM_POLYGON = [(0,0),(640,0),(640,480),(320,480),(320,240),(0,240)] ROOM_POLYGON = [(0,0),(640,0),(640,480),(320,480),(250,240),(0,240)] SMALL_SQUARE = [(0,0),(10,0),(10,10),(0,10)] OBSTECLES = [transpose_polygon(SMALL_SQUARE,(200,45)), transpose_polygon(SMALL_SQUARE,(270,45)), transpose_polygon(SMALL_SQUARE,(200,125)), transpose_polygon(SMALL_SQUARE,(270,125)),] ROOMBA_SIZE = 20 MIN_COVERAGE_TO_EXIT = 0.988 MAX_NO_GAIN_STEPS = 3000 def main(): robot_params = {"ROBOT_SIZE":ROOMBA_SIZE, "HEAD_SIZE":1.9, "SPEED":3} room_params = {"ROOM_POLYGON":ROOM_POLYGON, "OBSTECLES":OBSTECLES} stop_conditions = {"MIN_COVERAGE_TO_EXIT":MIN_COVERAGE_TO_EXIT, "MAX_NO_GAIN_STEPS":MAX_NO_GAIN_STEPS, "MAX_TIME":9000} stats = run_simulation(robot_params, room_params, stop_conditions, visual_feedback=True) matplotlib.pyplot.plot(stats) matplotlib.pyplot.show() if __name__ == "__main__": main()
/helper_functions.py
import math class Point(object): def __init__(self,coords): self.x = coords[0] self.y = coords[1] def delta(self,other): return Point([self.x-other.x,self.y-other.y]) def dot(self,other): return self.x*other.x + self.y*other.y def rotate(coords, direction): # from https://www.siggraph.org/education/materials/HyperGraph/modeling/mod_tran/2drota.htm x,y = coords cos_d = math.cos(direction) sin_d = math.sin(direction) return (x*cos_d - y*sin_d, y*cos_d + x*sin_d) def line_circle_intersect(line_details, circle_details): # Based upon http://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm E = line_details[0] L = line_details[1] C = circle_details[0] r = circle_details[1] d = L.delta(E) f = E.delta(C) a = d.dot(d) b = 2*f.dot(d) c = f.dot(f) - r*r discriminant = b*b-4*a*c if discriminant < 0: return False discriminant = math.sqrt(discriminant) t1 = (-b - discriminant)/(2*a) t2 = (-b + discriminant)/(2*a) t1_good = t1 >= 0 and t1 <= 1 t2_good = t2 >= 0 and t2 <= 1 return t1_good or t2_good def rotate_polygon(poly,direction): return [rotate(p,direction) for p in poly] def transpose_polygon(poly,delta_coords): return [[x+y for x,y in zip(p,delta_coords)] for p in poly] def polygon_bbox(poly): return [min(x[0] for x in poly), min(x[1] for x in poly), max(x[0] for x in poly), max(x[1] for x in poly)] def is_circle_coliding_with_poligon(polygon, center, radius): for line in zip(polygon,polygon[1:]+[polygon[0]]): if line_circle_intersect([Point(line[0]),Point(line[1])], [Point(center), radius]): return True return False
/roomba_model.py
import math import random from cleaning_robot_model import CleaningRobotModel from helper_functions import * class RoombaModel(CleaningRobotModel): MODE_TIME_LIMIT = [500,2000] TURN_SIZE_ON_WALL_FOLLOW = math.pi/180. MAX_TURN_STEPS = 360 SPIRAL_ANGLE_INIT = math.pi/18. SPIRAL_ANGLE_RATIO = 0.995 def __init__(self, *args, **kwargs): super(RoombaModel,self).__init__(*args, **kwargs) self.in_random_direction_mode = False self.looking_for_wall = False self.spiral_mode = True self.spiral_angle = self.SPIRAL_ANGLE_INIT self.time_in_mode = 0 if "MODE_TIME_LIMIT" in kwargs: self.MODE_TIME_LIMIT = kwargs["MODE_TIME_LIMIT"] if "TURN_SIZE_ON_WALL_FOLLOW" in kwargs: self.TURN_SIZE_ON_WALL_FOLLOW = kwargs["TURN_SIZE_ON_WALL_FOLLOW"] self.MAX_TURN_STEPS = (2*math.pi)/self.TURN_SIZE_ON_WALL_FOLLOW def left_hand_tracking(self): found_wall = False for i in range(self.MAX_TURN_STEPS): self.turn(-self.TURN_SIZE_ON_WALL_FOLLOW) if self.check_move(): found_wall = True break if not found_wall: self.looking_for_wall = True self.turn(self.TURN_SIZE_ON_WALL_FOLLOW) def spiral_step(self): self.turn(self.spiral_angle) self.spiral_angle = self.spiral_angle * self.SPIRAL_ANGLE_RATIO def step(self): if not self.in_random_direction_mode and not self.looking_for_wall: self.left_hand_tracking() if self.spiral_mode: self.spiral_step() collided = self.move() self.time_in_mode += 1 if collided: self.looking_for_wall = False self.spiral_mode = False if self.in_random_direction_mode: self.turn(random.randint(0,360)*math.pi/180.) else: while self.check_move(): self.turn(self.TURN_SIZE_ON_WALL_FOLLOW) if not self.spiral_mode and self.time_in_mode > self.MODE_TIME_LIMIT[self.in_random_direction_mode]: self.in_random_direction_mode = not self.in_random_direction_mode self.time_in_mode = 0 print "Switched to mode",self.in_random_direction_mode
/simulator.py
import time import pygame import math import random import itertools import arena_model import arena_view import roomba_model def run_simulation(robot_params={}, room_params={}, stop_conditions={}, visual_feedback=True, draw_final_result=True): stats = [] room_polygon = room_params["ROOM_POLYGON"] obstecles = room_params["OBSTECLES"] max_x = max(x[0] for x in room_polygon) max_y = max(x[1] for x in room_polygon) robot_size = robot_params["ROBOT_SIZE"] if visual_feedback: view = arena_view.ScreenView(robot_size, [max_x,max_y]) room_model = arena_model.RoomModel(room_polygon,obstecles) if "INITIAL_POS" in robot_params: start_x,start_y,direction = robot_params["INITIAL_POS"] else: start_x,start_y=random.randint(0,max_x),random.randint(0,max_y) while not room_model.is_good_start_point((start_x,start_y),robot_size): start_x,start_y=random.randint(0,max_x),random.randint(0,max_y) direction = random.randint(0,360)*math.pi/180. roomba = roomba_model.RoombaModel((start_x,start_y), robot_size, robot_params["HEAD_SIZE"], direction, robot_params["SPEED"], room_model) done = False last_coverage = 0 steps_with_no_improvement = 0 min_coverage = None if "MIN_COVERAGE_TO_EXIT" in stop_conditions: min_coverage = stop_conditions["MIN_COVERAGE_TO_EXIT"] max_no_gain_steps = 0 if "MAX_NO_GAIN_STEPS" in stop_conditions: max_no_gain_steps = stop_conditions["MAX_NO_GAIN_STEPS"] max_time = None if "MAX_TIME" in stop_conditions: max_time = stop_conditions["MAX_TIME"] for t in itertools.count(): coverage = float(room_model.clean_count)/(room_model.clean_count + room_model.dirty_count) stats.append(coverage) if coverage == last_coverage and min_coverage != None and coverage > min_coverage: steps_with_no_improvement += 1 if steps_with_no_improvement > max_no_gain_steps: done = True last_coverage = coverage if max_time != None and t > max_time: done = True if visual_feedback: view.clear_screen(room_model.state) for event in pygame.event.get(): # User did something #print "Got event",event,"type:",event.type if event.type == pygame.QUIT: # If user clicked close done=True if done: break roomba.step() if visual_feedback: view.draw_roomba(*roomba.get_draw_info()) if not visual_feedback and draw_final_result: view = arena_view.ScreenView(robot_size, [max_x,max_y]) view.clear_screen(room_model.state) view.draw_roomba(*roomba.get_draw_info()) view.clear_screen(room_model.state) return stats
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
gheinrich/DIGITS
refs/heads/master
{"/digits/model/images/classification/forms.py": ["/digits/model/images/forms.py"], "/digits/model/images/generic/forms.py": ["/digits/model/images/forms.py"]}
└── └── digits ├── config │ └── __init__.py ├── dataset │ └── tasks │ └── __init__.py ├── model │ ├── images │ │ ├── classification │ │ │ └── forms.py │ │ ├── forms.py │ │ └── generic │ │ └── forms.py │ └── tasks │ ├── __init__.py │ └── test_caffe_train.py └── pretrained_model ├── __init__.py └── tasks └── __init__.py
/digits/config/__init__.py
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import # Create this object before importing the following imports, since they edit the list option_list = {} from . import caffe from . import gpu_list from . import jobs_dir from . import log_file from . import torch from . import server_name from . import store_option def config_value(option): """ Return the current configuration value for the given option """ return option_list[option]
/digits/dataset/tasks/__init__.py
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from .analyze_db import AnalyzeDbTask from .create_db import CreateDbTask from .create_generic_db import CreateGenericDbTask from .parse_folder import ParseFolderTask
/digits/model/images/classification/forms.py
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import import wtforms from wtforms import validators from ..forms import ImageModelForm class ImageClassificationModelForm(ImageModelForm): """ Defines the form used to create a new ImageClassificationModelJob """ pass
/digits/model/images/forms.py
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import import wtforms from wtforms import validators from ..forms import ModelForm from digits import utils class ImageModelForm(ModelForm): """ Defines the form used to create a new ImageModelJob """ crop_size = utils.forms.IntegerField('Crop Size', validators = [ validators.NumberRange(min=1), validators.Optional() ], tooltip = "If specified, during training a random square crop will be taken from the input image before using as input for the network." ) use_mean = utils.forms.SelectField('Subtract Mean', choices = [ ('none', 'None'), ('image', 'Image'), ('pixel', 'Pixel'), ], default='image', tooltip = "Subtract the mean file or mean pixel for this dataset from each image." ) aug_flip = utils.forms.SelectField('Flipping', choices = [ ('none', 'None'), ('fliplr', 'Horizontal'), ('flipud', 'Vertical'), ('fliplrud', 'Horizontal and/or Vertical'), ], default='none', tooltip = "Randomly flips each image during batch preprocessing." ) aug_quad_rot = utils.forms.SelectField('Quadrilateral Rotation', choices = [ ('none', 'None'), ('rot90', '0, 90 or 270 degrees'), ('rot180', '0 or 180 degrees'), ('rotall', '0, 90, 180 or 270 degrees.'), ], default='none', tooltip = "Randomly rotates (90 degree steps) each image during batch preprocessing." ) aug_rot = utils.forms.IntegerField('Rotation (+- deg)', default=0, validators=[ validators.NumberRange(min=0, max=180) ], tooltip = "The uniform-random rotation angle that will be performed during batch preprocessing." ) aug_scale = utils.forms.FloatField('Rescale (stddev)', default=0, validators=[ validators.NumberRange(min=0, max=1) ], tooltip = "Retaining image size, the image is rescaled with a +-stddev of this parameter. Suggested value is 0.07." ) aug_noise = utils.forms.FloatField('Noise (stddev)', default=0, validators=[ validators.NumberRange(min=0, max=1) ], tooltip = "Adds AWGN (Additive White Gaussian Noise) during batch preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03." ) aug_hsv_use = utils.forms.BooleanField('HSV Shifting', default = False, tooltip = "Augmentation by normal-distributed random shifts in HSV color space, assuming [0 1] pixel-value range.", validators=[ ] ) aug_hsv_h = utils.forms.FloatField('Hue', default=0.02, validators=[ validators.NumberRange(min=0, max=0.5) ], tooltip = "Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range." ) aug_hsv_s = utils.forms.FloatField('Saturation', default=0.04, validators=[ validators.NumberRange(min=0, max=0.5) ], tooltip = "Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range." ) aug_hsv_v = utils.forms.FloatField('Value', default=0.06, validators=[ validators.NumberRange(min=0, max=0.5) ], tooltip = "Standard deviation of a shift that will be performed during preprocessing, assuming [0 1] pixel-value range." )
/digits/model/images/generic/forms.py
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import import wtforms from wtforms import validators from ..forms import ImageModelForm class GenericImageModelForm(ImageModelForm): """ Defines the form used to create a new GenericImageModelJob """ pass
/digits/model/tasks/__init__.py
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from .caffe_train import CaffeTrainTask from .torch_train import TorchTrainTask from .train import TrainTask
/digits/model/tasks/test_caffe_train.py
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from . import caffe_train from digits import test_utils def test_caffe_imports(): test_utils.skipIfNotFramework('caffe') import numpy import google.protobuf
/digits/pretrained_model/__init__.py
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from .job import PretrainedModelJob
/digits/pretrained_model/tasks/__init__.py
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from .upload_pretrained import UploadPretrainedModelTask from .caffe_upload import CaffeUploadTask from .torch_upload import TorchUploadTask
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
welloderx/wechat-2021-BigDataChallenge
refs/heads/master
{"/src/deepctr_ext/utils.py": ["/src/deepctr_ext/feat.py", "/src/deepctr_ext/layers.py"], "/src/main.py": ["/src/core/entrypoint.py"], "/src/core/entrypoint.py": ["/src/core/tasks/lgb.py"]}
└── └── src ├── core │ ├── entrypoint.py │ └── tasks │ └── lgb.py ├── deepctr_ext │ ├── feat.py │ ├── layers.py │ └── utils.py └── main.py
/src/core/entrypoint.py
from core.tasks.deepfm import DeepFM_Manager from core.tasks.lgb import LightGBM_Manager class EntryPoint(object): def __init__(self, cfg): self.cfg = cfg def start(self): if self.cfg.task == 'DeepFM': task = DeepFM_Manager(self.cfg) task.start() elif self.cfg.task == 'LightGBM': task = LightGBM_Manager(self.cfg) task.start() else: raise ValueError("unknown task name")
/src/core/tasks/lgb.py
""" LightGBM """ import lightgbm as lgb import pandas from utils import DecoratorTimer class LightGBM_Manager(object): model_name = 'LightGBM' def __init__(self, cfg): self.cfg = cfg self.yml_cfg = self.cfg.yml_cfg self.model_cfg = self.yml_cfg[self.model_name] assert self.cfg.dataset_name == 'wechat1' @DecoratorTimer() def handle_dataset(self): # config data_folder_path = self.cfg.data_folder_path # columns common_columns = ['userid', 'feedid'] pred_columns = ['read_comment', 'like', 'click_avatar', 'forward'] action_columns = ['play', 'stay', 'device', 'date_', 'follow', 'favorite', 'comment'] feed_columns = [ 'authorid', 'videoplayseconds', 'description', 'ocr', 'asr', 'description_char', 'ocr_char', 'asr_char', 'bgm_song_id', 'bgm_singer_id', 'manual_keyword_list', 'machine_keyword_list', 'manual_tag_list', 'machine_tag_list', 'feed_embedding' ] # feat types sparse_feat_names = common_columns + \ ['follow', 'favorite', 'comment', 'authorid', 'bgm_song_id', 'bgm_singer_id'] dense_feat_names = ['videoplayseconds', 'play', 'stay'] # handle raw_feed_info = pandas.read_csv(data_folder_path + "/feed_info.csv") raw_user_action = pandas.read_csv(data_folder_path + "/user_action.csv") def start(self): self.handle_dataset()
/src/deepctr_ext/feat.py
from collections import namedtuple class SparseFeat(namedtuple('SparseFeat', ['name', 'vocabulary_size', 'embedding_dim', 'use_hash', 'dtype', 'embedding_name'])): __slots__ = () def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype="int32", embedding_name=None): if embedding_name is None: embedding_name = name if embedding_dim == "auto": embedding_dim = 6 * int(pow(vocabulary_size, 0.25)) if use_hash: print( "Notice! Feature Hashing on the fly currently is not supported in torch version,you can use tensorflow version!") return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype, embedding_name) def __hash__(self): return self.name.__hash__() class VarLenSparseFeat(namedtuple('VarLenSparseFeat', ['sparsefeat', 'maxlen', 'combiner', 'length_name'])): __slots__ = () def __new__(cls, sparsefeat, maxlen, combiner="mean", length_name=None): return super(VarLenSparseFeat, cls).__new__(cls, sparsefeat, maxlen, combiner, length_name) @property def name(self): return self.sparsefeat.name @property def vocabulary_size(self): return self.sparsefeat.vocabulary_size @property def embedding_dim(self): return self.sparsefeat.embedding_dim @property def dtype(self): return self.sparsefeat.dtype @property def embedding_name(self): return self.sparsefeat.embedding_name @property def group_name(self): return self.sparsefeat.group_name def __hash__(self): return self.name.__hash__() class DenseFeat(namedtuple('DenseFeat', ['name', 'dimension', 'dtype'])): __slots__ = () def __new__(cls, name, dimension=1, dtype="float32"): return super(DenseFeat, cls).__new__(cls, name, dimension, dtype) def __hash__(self): return self.name.__hash__()
/src/deepctr_ext/layers.py
import torch.nn as nn import torch class FM(nn.Module): """Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. References - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf) """ def __init__(self): super(FM, self).__init__() def forward(self, inputs): fm_input = inputs square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2) sum_of_square = torch.sum(fm_input * fm_input, dim=1, keepdim=True) cross_term = square_of_sum - sum_of_square cross_term = 0.5 * torch.sum(cross_term, dim=2, keepdim=False) return cross_term class Identity(nn.Module): def __init__(self, **kwargs): super(Identity, self).__init__() def forward(self, X): return X def activation_layer(act_name, hidden_size=None, dice_dim=2): """Construct activation layers Args: act_name: str or nn.Module, name of activation function hidden_size: int, used for Dice activation dice_dim: int, used for Dice activation Return: act_layer: activation layer """ act_layer = None if isinstance(act_name, str): if act_name.lower() == 'sigmoid': act_layer = nn.Sigmoid() elif act_name.lower() == 'linear': act_layer = Identity() elif act_name.lower() == 'relu': act_layer = nn.ReLU(inplace=True) elif act_name.lower() == 'prelu': act_layer = nn.PReLU() elif issubclass(act_name, nn.Module): act_layer = act_name() else: raise NotImplementedError return act_layer class DNN(nn.Module): """The Multi Layer Percetron Input shape - nD tensor with shape: ``(batch_size, ..., input_dim)``. The most common situation would be a 2D input with shape ``(batch_size, input_dim)``. Output shape - nD tensor with shape: ``(batch_size, ..., hidden_size[-1])``. For instance, for a 2D input with shape ``(batch_size, input_dim)``, the output would have shape ``(batch_size, hidden_size[-1])``. Arguments - **inputs_dim**: input feature dimension. - **hidden_units**:list of positive integer, the layer number and units in each layer. - **activation**: Activation function to use. - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix. - **dropout_rate**: float in [0,1). Fraction of the units to dropout. - **use_bn**: bool. Whether use BatchNormalization before activation or not. - **seed**: A Python integer to use as random seed. """ def __init__(self, inputs_dim, hidden_units, activation='relu', l2_reg=0, dropout_rate=0, use_bn=False, init_std=0.0001, dice_dim=3, seed=1024, device='cpu'): super(DNN, self).__init__() self.dropout_rate = dropout_rate self.dropout = nn.Dropout(dropout_rate) self.seed = seed self.l2_reg = l2_reg self.use_bn = use_bn if len(hidden_units) == 0: raise ValueError("hidden_units is empty!!") hidden_units = [inputs_dim] + list(hidden_units) self.linears = nn.ModuleList( [nn.Linear(hidden_units[i], hidden_units[i + 1]) for i in range(len(hidden_units) - 1)]) if self.use_bn: self.bn = nn.ModuleList( [nn.BatchNorm1d(hidden_units[i + 1]) for i in range(len(hidden_units) - 1)]) self.activation_layers = nn.ModuleList( [activation_layer(activation, hidden_units[i + 1], dice_dim) for i in range(len(hidden_units) - 1)]) for name, tensor in self.linears.named_parameters(): if 'weight' in name: nn.init.normal_(tensor, mean=0, std=init_std) self.to(device) def forward(self, inputs): deep_input = inputs for i in range(len(self.linears)): fc = self.linears[i](deep_input) if self.use_bn: fc = self.bn[i](fc) fc = self.activation_layers[i](fc) fc = self.dropout(fc) deep_input = fc return deep_input class PredictionLayer(nn.Module): """ Arguments - **task**: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss - **use_bias**: bool.Whether add bias term or not. """ def __init__(self, task='binary', use_bias=True, **kwargs): if task not in ["binary", "multiclass", "regression"]: raise ValueError("task must be binary,multiclass or regression") super(PredictionLayer, self).__init__() self.use_bias = use_bias self.task = task if self.use_bias: self.bias = nn.Parameter(torch.zeros((1,))) def forward(self, X): output = X if self.use_bias: output += self.bias if self.task == "binary": output = torch.sigmoid(output) return output class SequencePoolingLayer(nn.Module): """The SequencePoolingLayer is used to apply pooling operation(sum,mean,max) on variable-length sequence feature/multi-value feature. Input shape - A list of two tensor [seq_value,seq_len] - seq_value is a 3D tensor with shape: ``(batch_size, T, embedding_size)`` - seq_len is a 2D tensor with shape : ``(batch_size, 1)``,indicate valid length of each sequence. Output shape - 3D tensor with shape: ``(batch_size, 1, embedding_size)``. Arguments - **mode**:str.Pooling operation to be used,can be sum,mean or max. """ def __init__(self, mode='mean', supports_masking=False, device='cpu'): super(SequencePoolingLayer, self).__init__() if mode not in ['sum', 'mean', 'max']: raise ValueError('parameter mode should in [sum, mean, max]') self.supports_masking = supports_masking self.mode = mode self.device = device self.eps = torch.FloatTensor([1e-8]).to(device) self.to(device) def _sequence_mask(self, lengths, maxlen=None, dtype=torch.bool): # Returns a mask tensor representing the first N positions of each cell. if maxlen is None: maxlen = lengths.max() row_vector = torch.arange(0, maxlen, 1).to(lengths.device) matrix = torch.unsqueeze(lengths, dim=-1) mask = row_vector < matrix mask.type(dtype) return mask def forward(self, seq_value_len_list): if self.supports_masking: uiseq_embed_list, mask = seq_value_len_list # [B, T, E], [B, 1] mask = mask.float() user_behavior_length = torch.sum(mask, dim=-1, keepdim=True) mask = mask.unsqueeze(2) else: uiseq_embed_list, user_behavior_length = seq_value_len_list # [B, T, E], [B, 1] mask = self._sequence_mask(user_behavior_length, maxlen=uiseq_embed_list.shape[1], dtype=torch.float32) # [B, 1, maxlen] mask = torch.transpose(mask, 1, 2) # [B, maxlen, 1] embedding_size = uiseq_embed_list.shape[-1] mask = torch.repeat_interleave(mask, embedding_size, dim=2) # [B, maxlen, E] if self.mode == 'max': hist = uiseq_embed_list - (1 - mask) * 1e9 hist = torch.max(hist, dim=1, keepdim=True)[0] return hist hist = uiseq_embed_list * mask.float() hist = torch.sum(hist, dim=1, keepdim=False) if self.mode == 'mean': self.eps = self.eps.to(user_behavior_length.device) hist = torch.div(hist, user_behavior_length.type(torch.float32) + self.eps) hist = torch.unsqueeze(hist, dim=1) return hist
/src/deepctr_ext/utils.py
from collections import OrderedDict from .feat import SparseFeat, DenseFeat, VarLenSparseFeat import torch.nn as nn import numpy as np import torch from .layers import SequencePoolingLayer def get_feature_names(feature_columns): features = build_input_features(feature_columns) return list(features.keys()) def build_input_features(feature_columns): # Return OrderedDict: {feature_name:(start, start+dimension)} features = OrderedDict() start = 0 for feat in feature_columns: feat_name = feat.name if feat_name in features: continue if isinstance(feat, SparseFeat): features[feat_name] = (start, start + 1) start += 1 elif isinstance(feat, DenseFeat): features[feat_name] = (start, start + feat.dimension) start += feat.dimension elif isinstance(feat, VarLenSparseFeat): features[feat_name] = (start, start + feat.maxlen) start += feat.maxlen if feat.length_name is not None and feat.length_name not in features: features[feat.length_name] = (start, start + 1) start += 1 else: raise TypeError("Invalid feature column type,got", type(feat)) return features def create_embedding_matrix(feature_columns, init_std=0.0001, linear=False, sparse=False, device='cpu'): # Return nn.ModuleDict: for sparse features, {embedding_name: nn.Embedding} # for varlen sparse features, {embedding_name: nn.EmbeddingBag} sparse_feature_columns = list( filter(lambda x: isinstance(x, SparseFeat), feature_columns)) if len(feature_columns) else [] varlen_sparse_feature_columns = list( filter(lambda x: isinstance(x, VarLenSparseFeat), feature_columns)) if len(feature_columns) else [] embedding_dict = nn.ModuleDict( {feat.embedding_name: nn.Embedding(feat.vocabulary_size, feat.embedding_dim if not linear else 1, sparse=sparse) for feat in sparse_feature_columns + varlen_sparse_feature_columns} ) # for feat in varlen_sparse_feature_columns: # embedding_dict[feat.embedding_name] = nn.EmbeddingBag( # feat.dimension, embedding_size, sparse=sparse, mode=feat.combiner) for tensor in embedding_dict.values(): nn.init.normal_(tensor.weight, mean=0, std=init_std) return embedding_dict.to(device) # ---------------------------------- def get_varlen_pooling_list(embedding_dict, features, feature_index, varlen_sparse_feature_columns, device): varlen_sparse_embedding_list = [] for feat in varlen_sparse_feature_columns: seq_emb = embedding_dict[feat.embedding_name]( features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long()) if feat.length_name is None: seq_mask = features[:, feature_index[feat.name][0]:feature_index[feat.name][1]].long() != 0 emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=True, device=device)( [seq_emb, seq_mask]) else: seq_length = features[:, feature_index[feat.length_name][0]:feature_index[feat.length_name][1]].long() emb = SequencePoolingLayer(mode=feat.combiner, supports_masking=False, device=device)( [seq_emb, seq_length]) varlen_sparse_embedding_list.append(emb) return varlen_sparse_embedding_list # ------------------------------- def combined_dnn_input(sparse_embedding_list, dense_value_list): if len(sparse_embedding_list) > 0 and len(dense_value_list) > 0: sparse_dnn_input = torch.flatten( torch.cat(sparse_embedding_list, dim=-1), start_dim=1) dense_dnn_input = torch.flatten( torch.cat(dense_value_list, dim=-1), start_dim=1) return concat_fun([sparse_dnn_input, dense_dnn_input]) elif len(sparse_embedding_list) > 0: return torch.flatten(torch.cat(sparse_embedding_list, dim=-1), start_dim=1) elif len(dense_value_list) > 0: return torch.flatten(torch.cat(dense_value_list, dim=-1), start_dim=1) else: raise NotImplementedError def concat_fun(inputs, axis=-1): if len(inputs) == 1: return inputs[0] else: return torch.cat(inputs, dim=axis) def slice_arrays(arrays, start=None, stop=None): """Slice an array or list of arrays. This takes an array-like, or a list of array-likes, and outputs: - arrays[start:stop] if `arrays` is an array-like - [x[start:stop] for x in arrays] if `arrays` is a list Can also work on list/array of indices: `slice_arrays(x, indices)` Arguments: arrays: Single array or list of arrays. start: can be an integer index (start index) or a list/array of indices stop: integer (stop index); should be None if `start` was a list. Returns: A slice of the array(s). Raises: ValueError: If the value of start is a list and stop is not None. """ if arrays is None: return [None] if isinstance(arrays, np.ndarray): arrays = [arrays] if isinstance(start, list) and stop is not None: raise ValueError('The stop argument has to be None if the value of start ' 'is a list.') elif isinstance(arrays, list): if hasattr(start, '__len__'): # hdf5 datasets only support list objects as indices if hasattr(start, 'shape'): start = start.tolist() return [None if x is None else x[start] for x in arrays] else: if len(arrays) == 1: return arrays[0][start:stop] return [None if x is None else x[start:stop] for x in arrays] else: if hasattr(start, '__len__'): if hasattr(start, 'shape'): start = start.tolist() return arrays[start] elif hasattr(start, '__getitem__'): return arrays[start:stop] else: return [None]
/src/main.py
from utils import UnionConfig, LoggerUtil, DecoratorTimer, PathUtil, add_argument_from_dict_format from conf import settings from core.entrypoint import EntryPoint import os import argparse import logging import shutil import traceback import copy import sys registered_task_list = ['DeepFM', 'LightGBM'] def get_config_object_and_parse_args(): # first time resolve sys.argv parser = argparse.ArgumentParser() parser.add_argument('--dataset_name', type=str, default='wechat1', help='dataset name') parser.add_argument('--task', type=str, default='LightGBM', choices=registered_task_list, help='task_name: {}'.format(registered_task_list)) args, unknown_args = parser.parse_known_args() config = UnionConfig.from_py_module(settings) # get config from settings.py config.merge_asdict(args.__dict__) # merge config from argparse yml_cfg = UnionConfig.from_yml_file( config.CONFIG_FOLDER_PATH + "/datasets/{}.yml".format(args.dataset_name) ) # get config from {dataset_name}.yml # filter irrelevant config tasks = copy.copy(registered_task_list) tasks.remove(config.task) [yml_cfg.__delitem__(task) for task in tasks if task in yml_cfg.keys()] config.yml_cfg = yml_cfg # second time resolve sys.argv model_cfg = yml_cfg[config.task] parser2 = add_argument_from_dict_format(model_cfg, filter_keys=list(args.__dict__.keys())) args2 = parser2.parse_args(unknown_args) for key in model_cfg.keys(): if key in args2.__dict__: model_cfg[key] = args2.__dict__[key] return config def init_all(cfg: UnionConfig): cfg.data_folder_path = cfg.DATA_FOLDER_PATH + "/{}".format(cfg.dataset_name) cfg.TMPOUT_FOLDER_PATH += "/{}".format(cfg.dataset_name) cfg.OUTPUT_FOLDER_PATH += "/{}".format(cfg.dataset_name) cfg.TMPOUT_FOLDER_PATH = os.path.realpath(cfg.TMPOUT_FOLDER_PATH) cfg.OUTPUT_FOLDER_PATH = os.path.realpath(cfg.OUTPUT_FOLDER_PATH) PathUtil.check_path_exist(cfg.data_folder_path) if cfg.task in registered_task_list: cfg.tmpout_folder_path = cfg.TMPOUT_FOLDER_PATH + "/{}/{}".format(cfg.task, cfg.ID) cfg.output_folder_path = cfg.OUTPUT_FOLDER_PATH + "/{}".format(cfg.task) PathUtil.auto_create_folder_path( cfg.tmpout_folder_path, cfg.output_folder_path ) else: raise ValueError("unknown task name") log_filepath = cfg.tmpout_folder_path + "/{ID}.log".format(ID=cfg.ID) cfg.logger = LoggerUtil(logfile=log_filepath, disableFile=False).get_logger() DecoratorTimer.logger = cfg.logger def main(config): config.logger.info("====" * 15) config.logger.info("[ID]: " + config.ID) config.logger.info("[DATASET]: " + config.dataset_name) config.logger.info("[TASK]: " + config.task) config.logger.info("[ARGV]: {}".format(sys.argv)) config.logger.info("[ALL_CFG]: \n" + config.dump_fmt()) config.dump_file(config.tmpout_folder_path + "/" + "config.json") config.logger.info("====" * 15) entrypoint = EntryPoint(config) entrypoint.start() config.logger.info("Task Completed!") if __name__ == '__main__': config = get_config_object_and_parse_args() init_all(config) # init config try: main(config) logging.shutdown() shutil.move(config.tmpout_folder_path, config.output_folder_path) except Exception as e: config.logger.error(traceback.format_exc()) raise e
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
JiTao3/hierarchical_attention
refs/heads/master
{"/model/decoder.py": ["/util/prase_tree2node_leaf.py", "/model/encoder.py", "/util/plan_to_tree.py"], "/model/encoder.py": ["/util/prase_tree2node_leaf.py", "/util/plan_to_tree.py", "/util/dataset.py"], "/util/dataset.py": ["/util/prase_tree2node_leaf.py", "/util/plan_to_tree.py"], "/train.py": ["/model/encoder.py", "/util/dataset.py"], "/util/prase_tree2node_leaf.py": ["/util/plan_to_tree.py"], "/util/result.py": ["/util/qerror.py"]}
└── ├── model │ ├── decoder.py │ └── encoder.py ├── train.py └── util ├── __init__.py ├── dataset.py ├── plan_to_tree.py ├── prase_tree2node_leaf.py ├── qerror.py └── result.py
/model/decoder.py
from torch.autograd import Variable import time import copy import math import torch.nn.functional as F import torch.nn as nn import torch import numpy as np import os import sys sys.path.append(os.path.abspath(os.getcwd())) print(sys.path) from util.plan_to_tree import Node, parse_dep_tree_text from util.prase_tree2node_leaf import ( treeInterpolation, hierarchical_embeddings, upward_ca, tree2NodeLeafmat, ) from model.encoder import attention, WeightedAggregation, LayerNorm, Reshape, clones class DecoderLinear(nn.Module): def __init__(self, d_feature, d_model): super(DecoderLinear, self).__init__() self.query_linear = nn.Linear(d_model, d_feature) self.key_linear = nn.Linear(d_model, d_feature) self.vlaue_linear = nn.Linear(d_model, d_feature) def forward(self, x, target): value = self.value_linear(x) key = self.key_linear(x) query = self.query_linear(target) return value, key, query class DecoderAttentionScaledDot(nn.Module): def __init__(self, d_feature, d_model, dropout=0.1): super(DecoderAttentionScaledDot, self).__init__() # self.decoderLiner = DecoderLinear(d_feature, d_model) self.dropout = nn.Dropout(p=dropout) def forward(self, q_target, node_k, leaf_k, mask=None): Aqn = attention(query=q_target, key=node_k, mask=mask, dropout=self.dropout) Aql = attention(query=q_target, key=leaf_k, mask=mask, dropout=self.dropout) return Aqn, Aql class DecoderAttention(nn.Module): def __init__(self, d_feature, d_model): super(DecoderAttention, self).__init__() self.linear = DecoderLinear(d_feature=d_feature, d_model=d_model) self.scaledDot = DecoderAttentionScaledDot(d_feature=d_feature, d_model=d_model) self.weightedAgg = WeightedAggregation(d_feature) def forward(self, root, node, leaf, target): node_v, node_k, node_q = self.linear(node, target) leaf_v, leaf_k, leaf_q = self.linear(leaf, target) # node_q == leaf_q is target Aqn, Aql = self.scaledDot(node_q, node_k, leaf_k) # !!!! node_hat = ??? # but you should keep the order of node?!!! # the order of node_q & node and leaf_q & leaf should be same # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order interpolation_vec = treeInterpolation(root=root, leaf=leaf_v, node=node_v) # node + 1 * leaf * d # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order upward_ca_vec = upward_ca(interpolation_vec) # upward_ca_tensor = torch.from_numpy(upward_ca_vec) node_hat = self.weightAgg(leaf, upward_ca_vec) leaf_hat = leaf_v # !!!! dim Attq = F.softmax( torch.matmul( torch.cat(Aqn, Aql), torch.cat(node_hat.double(), leaf_hat, dim=-2) ) ) return Attq class DecoderLayer(nn.Module): def __init__(self, d_feature, d_model, d_ff): super(DecoderLayer, self).__init__() self.norm1 = LayerNorm(d_feature) self.norm2 = LayerNorm(d_feature) self.decoderAttention = DecoderAttention(d_feature, d_model) self.feed_forward = nn.Sequential( nn.Linear(d_model, d_ff), nn.ReLU(), nn.Linear(d_ff, d_model) ) def forward(self, root, node_x, leaf_x, target): # !!! target + mask(norm(attention(target))) x = self.decoderAttention(root, node_x, leaf_x, target) x = x + self.norm1(x) x = self.feed_forward(x) x = x + self.norm2(x) return x class Decoder(nn.Module): def __init__(self, d_feature, d_model, d_ff, N): super(Decoder, self).__init__() self.reshape = Reshape(d_feature=d_feature, d_model=d_model) self.layers = clones(DecoderLayer, N) def forward(self, root, node_x, leaf_x, target): target = self.reshape(target) for layer in self.layers: target = layer(root, node_x, leaf_x, target) return target
/model/encoder.py
import copy import math import torch.nn.functional as F import torch.nn as nn import torch import numpy as np import os import sys sys.path.append(os.path.abspath(os.getcwd())) # print(sys.path) from util.plan_to_tree import Node, parse_dep_tree_text from util.prase_tree2node_leaf import treeInterpolation, upward_ca, tree2NodeLeafmat, weightedAggregationCoeffi from util.dataset import PlanDataset def clones(module, N): if N <= 0: return [] else: return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class LayerNorm(nn.Module): def __init__(self, feature, eps=1e-6): super(LayerNorm, self).__init__() self.a_2 = nn.Parameter(torch.ones(feature), requires_grad=True) self.b_2 = nn.Parameter(torch.zeros(feature), requires_grad=True) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 def attention(query, key, mask=None, dropout=None): """get score""" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return p_attn class TreeAttentionLinear(nn.Module): def __init__(self, d_feature, d_model, dropout=0.1): super(TreeAttentionLinear, self).__init__() self.query_linear = nn.Linear(d_feature, d_model) self.key_linear = nn.Linear(d_feature, d_model) self.vlaue_linear = nn.Linear(d_feature, d_model) def forward(self, x): q = self.query_linear(x) k = self.key_linear(x) v = self.vlaue_linear(x) return q, k, v class TreeAttentionScaledDot(nn.Module): def __init__(self, d_feature, dropout=0.1): super(TreeAttentionScaledDot, self).__init__() # !!! use different dropout ??? self.dropout = nn.Dropout(p=dropout) # self.leafLinear = nn.Linear(d_feature, d_feature) def forward(self, node_q, node_k, leaf_q, leaf_k, mask=None): Anl = attention(query=node_q, key=leaf_k, mask=mask, dropout=self.dropout) Ann = attention(query=node_q, key=node_k, mask=mask, dropout=self.dropout) All = attention(query=leaf_q, key=leaf_k, mask=mask, dropout=self.dropout) Aln = attention(query=leaf_q, key=node_k, mask=mask, dropout=self.dropout) return Anl, Ann, All, Aln class WeightedAggregation(nn.Module): def __init__(self, d_feature): super(WeightedAggregation, self).__init__() # !!! self.u_s = nn.Parameter(torch.rand(d_feature, requires_grad=True)) self.register_parameter("U_s", self.u_s) self.d_featuer = d_feature def forward(self, root, leaf, upward_ca_vec): # omega size leaf * d omega = torch.matmul(leaf, self.u_s) # upward_ca_vec size node * leaf * d omega_shape = omega.shape[-1] weighted_aggregation_vec = upward_ca_vec * omega.reshape([1, omega_shape, 1]) # no_zero shape node * 1 # weight_aggregation_vec shape is node*leaf*d weighted_aggregation_vec = torch.sum(weighted_aggregation_vec, dim=1) # weight_aggregation_vec shape is node*d # upward_ca_vec_cp = copy.copy(upward_ca_vec) # nozero_div = (np.count_nonzero(upward_ca_vec_cp.detach().numpy(), axis=(1, 2)) + 1e-6) / self.d_featuer # no_zero = 1 / nozero_div # # no_zero_shape = # no_zero = torch.from_numpy(no_zero) # weighted_aggregation_vec = weighted_aggregation_vec * torch.unsqueeze(no_zero, 1) div = weightedAggregationCoeffi(root=root) weighted_aggregation_vec = weighted_aggregation_vec * torch.unsqueeze(div, 1) return weighted_aggregation_vec class TreeAttention(nn.Module): def __init__(self, d_feature, d_model): super(TreeAttention, self).__init__() self.nodelinear = TreeAttentionLinear(d_feature=d_feature, d_model=d_model) self.leaflinear = TreeAttentionLinear(d_feature=d_feature, d_model=d_model) self.scaledDot = TreeAttentionScaledDot(d_feature=d_feature) self.weightAgg = WeightedAggregation(d_feature=d_feature) def forward(self, root: Node, node, leaf): node_q, node_k, node_v = self.nodelinear(node) leaf_q, leaf_k, leaf_v = self.leaflinear(leaf) Anl, Ann, All, Aln = self.scaledDot(node_q, node_k, leaf_q, leaf_k) # !!!! node_hat = ??? # but you should keep the order of node?!!! # the order of node_q & node and leaf_q & leaf should be same # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order interpolation_vec = treeInterpolation(root=root, leaf=leaf_v, node=node_v) # node + 1 * leaf * d # you should use parse tree 2 node leaf plan_tree_leaves_node to keep the order upward_ca_vec = upward_ca(interpolation_vec) # upward_ca_tensor = torch.from_numpy(upward_ca_vec) node_hat = self.weightAgg(root, leaf, upward_ca_vec) leaf_hat = leaf_v # 1)!!! node_hat = ??? # 2) cat the matrix and return attn and attl # !!! DIM # !!! mask # AnnAnl = torch.cat((Ann, Anl),dim=-1) # leafnodehat = torch.cat((node_hat.float(), leaf_hat),dim=-2) Attn = torch.matmul( F.softmax(torch.cat((Ann, Anl), dim=-1), dim=-2), torch.cat((node_hat, leaf_hat), dim=-2), ) Attl = torch.matmul( F.softmax(torch.cat((Aln, All), dim=-1), dim=-2), torch.cat((node_hat, leaf_hat), dim=-2), ) return Attn, Attl class Reshape(nn.Module): def __init__(self, d_feature, d_model): super(Reshape, self).__init__() self.reshape = nn.Sequential(nn.Linear(d_feature, d_model), nn.ReLU()) def forward(self, x): return self.reshape(x) class EncoderLayer(nn.Module): def __init__(self, d_feature, d_model, d_ff): super(EncoderLayer, self).__init__() # self.reshape = nn.Linear(d_feature, d_model) self.treeattn = TreeAttention(d_feature, d_model) # Wo # !!! d self.linear = nn.Linear(d_model, d_model) # self.reshape = Reshape(d_feature, d_model) self.norm1 = LayerNorm(d_model) self.norm2 = LayerNorm(d_model) self.feed_forward = nn.Sequential( nn.Linear(d_model, d_ff), nn.ReLU(), nn.Linear(d_ff, d_ff // 2), nn.ReLU(), nn.Linear(d_ff // 2, d_model), nn.ReLU() ) def forward(self, root, node, leaf): Attn, Attl = self.treeattn(root, node, leaf) Attno, Attlo = self.linear(Attn), self.linear(Attl) node_x = node + self.norm1(Attno) leaf_x = leaf + self.norm2(Attlo) feed_node_x = self.feed_forward(node_x) feed_leaf_x = self.feed_forward(leaf_x) node_x = node_x + self.norm2(feed_node_x) leaf_x = leaf_x + self.norm2(feed_leaf_x) return node_x, leaf_x class Encoder(nn.Module): def __init__(self, d_feature, d_model, d_ff, N): super(Encoder, self).__init__() self.reshape = Reshape(d_feature=d_feature, d_model=d_model) self.firstEncoder = EncoderLayer(d_feature=d_feature, d_model=d_feature, d_ff=d_model) self.layers = clones( EncoderLayer(d_feature=d_model, d_model=d_model, d_ff=d_ff), N=N - 1 ) self.forward_net = nn.Sequential( nn.Linear(d_model, 1), nn.ReLU(), ) def forward(self, root, node, leaf): # node = self.reshape(node) # leaf = self.reshape(leaf) node, leaf = self.firstEncoder(root, node, leaf) node, leaf = self.reshape(node), self.reshape(leaf) for layer in self.layers: node, leaf = layer(root, node, leaf) x = torch.cat((node, leaf), dim=-2) # max pool x = torch.max(x, dim=-2, keepdim=True)[0] x = self.forward_net(x) return x.squeeze(-1) if __name__ == "__main__": encoder = Encoder(d_feature=9 + 6 + 64, d_model=512, d_ff=512, N=2).double() dataset = PlanDataset(root_dir="data/deep_cardinality") tree, nodemat, leafmat, label = dataset[51] print(nodemat.shape, leafmat.shape) x = encoder(tree, nodemat.double(), leafmat.double()) print(x)
/train.py
import math from model.encoder import Encoder from util.dataset import PlanDataset import torch import torch.optim as optim import torch.nn as nn from torch.utils.data import DataLoader, random_split from torchsummary import summary # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") dataset = PlanDataset(root_dir="data/deep_cardinality") dataloader = DataLoader(dataset, batch_size=1, shuffle=True) train_size = int(len(dataset) * 0.8) test_size = len(dataset) - train_size # train_temp = [dataset[i] for i in range(10)] # test_temp = [dataset[i] for i in range(5)] train_dataset, test_dataset = random_split(dataset, [train_size, test_size]) # train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=2) # test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=2) encoder = Encoder(d_feature=9 + 6 + 64, d_model=256, d_ff=128, N=4).double() summary(encoder) criterion = nn.MSELoss() optimizer = optim.Adam(encoder.parameters(), lr=0.001) epoch_size = 2 def train(): result = [] for epoch in range(epoch_size): print("epoch : ", epoch) running_loss = 0.0 for i, data in enumerate(train_dataset): tree, nodemat, leafmat, label = data optimizer.zero_grad() output = encoder(tree, nodemat.double(), leafmat.double()) # output = output if len(output.shape) > 1 or len(label.shape) > 1: print("output: {} ,label: {}".format(len(output.shape), len(label.shape))) loss = criterion(output, label) loss.backward() optimizer.step() running_loss += loss.item() if math.isnan(running_loss): print("nan: ", i, "\t", running_loss) if i % 200 == 0 and i != 0: print("[%d, %5d] loss: %4f" % (epoch + 1, i + 1, running_loss / 200)) running_loss = 0.0 test_loss = 0.0 with torch.no_grad(): for i, data in enumerate(test_dataset): tree, nodemat, leafmat, label = data test_output = encoder(tree, nodemat, leafmat) if epoch == epoch_size - 1: result.append((label, test_output)) loss = criterion(test_output, label) test_loss += loss.item() if i % 200 == 0 and i != 0: print("test loss: ", test_loss / test_size) return result def dataset_test(): for i, data in enumerate(test_dataset): tree, nodemat, leafmat, label = data print(label) if __name__ == "__main__": result = train() # result = [(1.1, 2.2), (3.3, 4.4), (5.5, 6.6)] with open("data/dmodel256/resutldeep_cv1.0dff128-e2-N4-lr0.001.txt", "w") as f: f.write("\n".join("{} {}".format(x[0].item(), x[1].item()) for x in result)) # torch.save(encoder, "model_parameter/encoderv1.0.pkl") # dataset_test()
/util/__init__.py
__all__=[ 'plan_to_tree', 'prase_tree2node_leaf' ]
/util/dataset.py
import time import copy import math import torch.nn.functional as F import torch.nn as nn import torch import numpy as np import os import sys from torch.utils.data import Dataset, DataLoader sys.path.append(os.path.abspath(os.getcwd())) # print(sys.path) from util.plan_to_tree import Node, parse_dep_tree_text, tree_feature_label from util.prase_tree2node_leaf import tree2NodeLeafmat class PlanDataset(Dataset): def __init__(self, root_dir, transform=None): self.root_dir = root_dir self.planTrees, self.maxchild = parse_dep_tree_text(folder_name=root_dir) self.trees_labels = [tree_feature_label(i) for i in self.planTrees] self.transform = transform def __len__(self): return len(self.planTrees) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() # root + label tree, label = self.trees_labels[idx] nodemat, leafmat = tree2NodeLeafmat(tree) return (tree, nodemat, leafmat, torch.tensor(label, dtype=torch.double).reshape((1))) def remove_signle_tree(root_dir, target_dir): planTrees, _ = parse_dep_tree_text(folder_name=root_dir) plan_dir = sorted(os.listdir(root_dir)) for dir_name, tree in zip(plan_dir, planTrees): if tree.children: with open(os.path.join(root_dir, dir_name), "r") as read_f: lines = read_f.readlines() with open(os.path.join(target_dir, dir_name), "w") as write_f: write_f.writelines(lines) def test_label(): dataset = PlanDataset(root_dir="/home/jitao/hierarchical_attention/data/deep_plan") for i, data in enumerate(dataset): tree, nodemat, leafmat, label = data # print(label.shape) print(label) if np.isnan(label.numpy()): print("nan:", i) if np.isinf(label.numpy()): print("inf", i) if __name__ == "__main__": remove_signle_tree( # root_dir="/data1/jitao/dataset/cardinality/all_plan", root_dir="/home/jitao/hierarchical_attention/data/cardinality", target_dir="/home/jitao/hierarchical_attention/data/deep_cardinality", ) # pass # data = PlanDataset(root_dir="data/data2") # test_label()
/util/plan_to_tree.py
import os import numpy as np operators = [ "Merge Join", "Hash", "Index Only Scan using title_pkey on title t", "Sort", "Seq Scan", "Index Scan using title_pkey on title t", "Materialize", "Nested Loop", "Hash Join", ] columns = [ "ci.movie_id", "t.id", "mi_idx.movie_id", "mi.movie_id", "mc.movie_id", "mk.movie_id", ] scan_features = np.load("/home/jitao/hierarchical_attention/model_parameter/featuer_deep_cardinality.npy") def extract_time(line): data = line.replace("->", "").lstrip().split(" ")[-1].split(" ") start_cost = data[0].split("..")[0].replace("(cost=", "") end_cost = data[0].split("..")[1] rows = data[1].replace("rows=", "") width = data[2].replace("width=", "").replace(")", "") a_start_cost = data[4].split("..")[0].replace("time=", "") a_end_cost = data[4].split("..")[1] a_rows = data[5].replace("rows=", "") return ( float(start_cost), float(end_cost), float(rows), float(width), float(a_start_cost), float(a_end_cost), float(a_rows), ) def extract_operator(line): operator = line.replace("->", "").lstrip().split(" ")[0] if operator.startswith("Seq Scan"): operator = "Seq Scan" return operator, operator in operators def extract_attributes(operator, line, feature_vec, i=None): operators = [ "Merge Join", "Hash", "Index Only Scan using title_pkey on title t", "Sort", "Seq Scan", "Index Scan using title_pkey on title t", "Materialize", "Nested Loop", "Hash Join", ] columns = [ "ci.movie_id", "t.id", "mi_idx.movie_id", "mi.movie_id", "mc.movie_id", "mk.movie_id", ] operators_count = len(operators) # 9 if operator in ["Hash", "Materialize", "Nested Loop"]: pass elif operator == "Merge Join": if "Cond" in line: for column in columns: if column in line: feature_vec[columns.index(column) + operators_count] = 1.0 elif operator == "Index Only Scan using title_pkey on title t": # feature_vec[15:56] = scan_features[i] if "Cond" in line: feature_vec[columns.index("t.id") + operators_count] = 1.0 for column in columns: if column in line: feature_vec[columns.index(column) + operators_count] = 1.0 elif operator == "Sort": for column in columns: if column in line: feature_vec[columns.index(column) + operators_count] = 1.0 elif operator == "Index Scan using title_pkey on title t": # feature_vec[15:56] = scan_features[i] if "Cond" in line: feature_vec[columns.index("t.id") + operators_count] = 1.0 for column in columns: if column in line: feature_vec[columns.index(column) + operators_count] = 1.0 elif operator == "Hash Join": if "Cond" in line: for column in columns: if column in line: feature_vec[columns.index(column) + operators_count] = 1.0 elif operator == "Seq Scan": feature_vec[15:79] = scan_features[i] # 64 """Tree node class""" class Node(object): def __init__(self, data, parent=None, index=-1): self.data = data self.children = [] self.parent = parent self.index = index def add_child(self, obj): self.children.append(obj) def add_parent(self, obj): self.parent = obj def __str__(self, tabs=0): tab_spaces = str.join("", [" " for i in range(tabs)]) return ( tab_spaces + "+-- Node: " + str.join("|", self.data) + "\n" + str.join("\n", [child.__str__(tabs + 2) for child in self.children]) ) def parse_dep_tree_text(folder_name="data"): scan_cnt = 0 max_children = 0 plan_trees = [] feature_len = 9 + 6 + 7 + 64 for each_plan in sorted(os.listdir(folder_name)): # print(each_plan) with open(os.path.join(folder_name, each_plan), "r") as f: lines = f.readlines() feature_vec = [0.0] * feature_len operator, in_operators = extract_operator(lines[0]) if not in_operators: operator, in_operators = extract_operator(lines[1]) start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( lines[1] ) j = 2 else: start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( lines[0] ) j = 1 feature_vec[feature_len - 7: feature_len] = [ start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows, ] feature_vec[operators.index(operator)] = 1.0 if operator == "Seq Scan": extract_attributes(operator, lines[j], feature_vec, scan_cnt) scan_cnt += 1 root_tokens = feature_vec current_node = Node(root_tokens) plan_trees.append(current_node) continue else: while "actual" not in lines[j] and "Plan" not in lines[j]: extract_attributes(operator, lines[j], feature_vec) j += 1 root_tokens = feature_vec # 所有吗 current_node = Node(root_tokens) plan_trees.append(current_node) spaces = 0 node_stack = [] i = j while not lines[i].startswith("Planning time"): line = lines[i] i += 1 if line.startswith("Planning time") or line.startswith( "Execution time" ): break elif line.strip() == "": break elif "->" not in line: continue else: if line.index("->") < spaces: while line.index("->") < spaces: current_node, spaces = node_stack.pop() if line.index("->") > spaces: line_copy = line feature_vec = [0.0] * feature_len start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( line_copy ) feature_vec[feature_len - 7: feature_len] = [ start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows, ] operator, in_operators = extract_operator(line_copy) feature_vec[operators.index(operator)] = 1.0 if operator == "Seq Scan": extract_attributes( operator, line_copy, feature_vec, scan_cnt ) scan_cnt += 1 else: j = 0 while ( "actual" not in lines[i + j] and "Plan" not in lines[i + j] ): extract_attributes(operator, lines[i + j], feature_vec) j += 1 tokens = feature_vec new_node = Node(tokens, parent=current_node) current_node.add_child(new_node) if len(current_node.children) > max_children: max_children = len(current_node.children) node_stack.append((current_node, spaces)) current_node = new_node spaces = line.index("->") elif line.index("->") == spaces: line_copy = line feature_vec = [0.0] * feature_len start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( line_copy ) feature_vec[feature_len - 7: feature_len] = [ start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows, ] operator, in_operators = extract_operator(line_copy) feature_vec[operators.index(operator)] = 1.0 if operator == "Seq Scan": extract_attributes( operator, line_copy, feature_vec, scan_cnt ) scan_cnt += 1 else: j = 0 while ( "actual" not in lines[i + j] and "Plan" not in lines[i + j] ): extract_attributes(operator, lines[i + j], feature_vec) j += 1 tokens = feature_vec new_node = Node(tokens, parent=node_stack[-1][0]) node_stack[-1][0].add_child(new_node) if len(node_stack[-1][0].children) > max_children: max_children = len(node_stack[-1][0].children) current_node = new_node spaces = line.index("->") # break # print(scan_cnt) return plan_trees, max_children # a list of the roots nodes def parse_dep_tree_text_lb_ub(folder_name="data/"): scan_cnt = 0 max_children = 0 plan_trees = [] feature_len = 9 + 6 + 7 + 32 for each_plan in sorted(os.listdir(folder_name)): # print(each_plan) with open(os.path.join(folder_name, each_plan), "r") as f: lines = f.readlines() feature_vec = [0.0] * feature_len operator, in_operators = extract_operator(lines[0]) if not in_operators: operator, in_operators = extract_operator(lines[1]) start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( lines[1] ) j = 2 else: start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( lines[0] ) j = 1 feature_vec[feature_len - 7: feature_len] = [ start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows, ] feature_vec[operators.index(operator)] = 1.0 if operator == "Seq Scan": extract_attributes(operator, lines[j], feature_vec, scan_cnt) scan_cnt += 1 root_tokens = feature_vec current_node = Node(root_tokens) plan_trees.append(current_node) continue else: while "actual" not in lines[j] and "Plan" not in lines[j]: extract_attributes(operator, lines[j], feature_vec) j += 1 root_tokens = feature_vec # 所有吗 current_node = Node(root_tokens) plan_trees.append(current_node) spaces = 0 node_stack = [] i = j while not lines[i].startswith("Planning time"): line = lines[i] i += 1 if line.startswith("Planning time") or line.startswith( "Execution time" ): break elif line.strip() == "": break elif "->" not in line: continue else: if line.index("->") < spaces: while line.index("->") < spaces: current_node, spaces = node_stack.pop() if line.index("->") > spaces: line_copy = line feature_vec = [0.0] * feature_len start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( line_copy ) feature_vec[feature_len - 7: feature_len] = [ start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows, ] operator, in_operators = extract_operator(line_copy) feature_vec[operators.index(operator)] = 1.0 if operator == "Seq Scan": # if(operator == "Seq Scan" or operator == "Index Only Scan using title_pkey on title t" # or operator=='Index Scan using title_pkey on title t'): extract_attributes( operator, line_copy, feature_vec, scan_cnt ) scan_cnt += 1 else: j = 0 while ( "actual" not in lines[i + j] and "Plan" not in lines[i + j] ): extract_attributes(operator, lines[i + j], feature_vec) j += 1 tokens = feature_vec new_node = Node(tokens, parent=current_node) current_node.add_child(new_node) if len(current_node.children) > max_children: max_children = len(current_node.children) node_stack.append((current_node, spaces)) current_node = new_node spaces = line.index("->") elif line.index("->") == spaces: line_copy = line feature_vec = [0.0] * feature_len start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows = extract_time( line_copy ) feature_vec[feature_len - 7: feature_len] = [ start_cost, end_cost, rows, width, a_start_cost, a_end_cost, a_rows, ] operator, in_operators = extract_operator(line_copy) feature_vec[operators.index(operator)] = 1.0 if operator == "Seq Scan": # if(operator == "Seq Scan" or operator == "Index Only Scan using title_pkey on title t" or # operator=='Index Scan using title_pkey on title t'): extract_attributes( operator, line_copy, feature_vec, scan_cnt ) scan_cnt += 1 else: j = 0 while ( "actual" not in lines[i + j] and "Plan" not in lines[i + j] ): extract_attributes(operator, lines[i + j], feature_vec) j += 1 tokens = feature_vec new_node = Node(tokens, parent=node_stack[-1][0]) node_stack[-1][0].add_child(new_node) if len(node_stack[-1][0].children) > max_children: max_children = len(node_stack[-1][0].children) current_node = new_node spaces = line.index("->") # break # print(scan_cnt) return plan_trees, max_children # a list of the roots nodes def p2t(node): # prediction to true cardinality # return float(start_cost),float(end_cost),float(rows),float(width), # float(a_start_cost),float(a_end_cost),float(a_rows) tree = {} tmp = node.data operators_count = 9 columns_count = 6 scan_features = 64 assert len(tmp) == operators_count + columns_count + 7 + scan_features tree["features"] = tmp[: operators_count + columns_count + scan_features] # tree['features'].append(tmp[-5]) #with card as feature tree["features"].append(tmp[-1]) # with Actual card as feature # cardinality # tree['labels'] = np.log(node.data[-1]+1) #cardinality # tree['pg'] = np.log(node.data[-5]) # cost tree["labels"] = np.log(node.data[-2]) # cost tree["pg"] = np.log(node.data[-6]) tree["children"] = [] for children in node.children: tree["children"].append(p2t(children)) return tree def tree_feature_label(root: Node): label = root.data[-1] operators_count = 9 columns_count = 6 scan_features = 64 feature_len = operators_count + columns_count + scan_features def feature(root: Node): root.data = root.data[:feature_len] if root.children: for child in root.children: feature(child) feature(root) return root, np.log(label) if label > 1 else label if __name__ == "__main__": print(os.path.abspath(".")) plan_tree, max_children = parse_dep_tree_text(folder_name="./data/deep_plan") # add_node_index(plan_tree[1]) # leaf,node = test(plan_tree[1]) print(len(plan_tree))
/util/prase_tree2node_leaf.py
from typing import List from collections import deque import copy import numpy as np import torch from util.plan_to_tree import Node, parse_dep_tree_text def add_node_index(root: Node) -> Node: # add an index tu the tree to identify a node uniquely # so that we can jsutufy the ancenstral relationship between two node index = 1 def add_index(root: Node): nonlocal index if not root: return -1 root.index = index index += 1 for child in root.children: add_index(child) add_index(root) return root def is_ancestor(leaf: Node, node: Node) -> bool: # function to determine whether node is an ancester of leaf node_queue = deque([node]) while node_queue: cnt_node = node_queue.popleft() for child in cnt_node.children: node_queue.append(child) if child.index == leaf.index: return True return False def parse_tree2leaves_node(root: Node): leaf = [] node = [] def plan_tree_leaves_node(root: Node): # return the tree leaves and node list if root.children: node.append(root) for child in root.children: plan_tree_leaves_node(child) else: leaf.append(root) plan_tree_leaves_node(root) return leaf, node def treeInterpolation(root: Node, leaf, node): # global FEATURE_LEN add_node_index(root) feature_len = leaf.shape[-1] leaf_order, node_order = parse_tree2leaves_node(root=root) tree_depth = len(node_order) tree_width = len(leaf_order) interpolation_vec = torch.zeros((tree_depth + 1, tree_width, feature_len), dtype=torch.double) for leaf_index in range(tree_width): interpolation_vec[tree_depth][leaf_index] = leaf[leaf_index] for leaf_index in range(tree_width): for node_index in range(tree_depth): if is_ancestor(leaf=leaf_order[leaf_index], node=node_order[node_index]): interpolation_vec[node_index][leaf_index] = node[node_index] hierarchical_embeddings_vec = hierarchical_embeddings( root=root, leaf_order=leaf_order, node_order=node_order, feature_len=feature_len ) # print(torch.nonzero(hierarchical_embeddings_vec)) # test_upward(interpolation_vec) return interpolation_vec + hierarchical_embeddings_vec def vertical_deepth(node: Node, leaf: Node) -> int: deepth = 0 node_queue = deque([node]) # size = len(node_queue) while node_queue: size = len(node_queue) deepth += 1 while size: cnt_node = node_queue.popleft() size -= 1 for child in cnt_node.children: node_queue.append(child) if child.index == leaf.index: return deepth def horizontal_width(root: Node) -> int: # if only root it will return root leaf, _ = parse_tree2leaves_node(root=root) return len(leaf) def hierarchical_embeddings(root: Node, leaf_order: List, node_order: List, feature_len: int): # global FEATURE_LEN tree_depth = len(node_order) tree_width = len(leaf_order) # feature_len = vertical_len = feature_len // 2 horizontal_len = feature_len // 2 hierarchical_emebdding_vec = torch.zeros( (tree_depth + 1, tree_width, feature_len), dtype=torch.double) for leaf_index in range(tree_width): for node_index in range(tree_depth): node = node_order[node_index] leaf = leaf_order[leaf_index] if is_ancestor(leaf=leaf, node=node): depth = vertical_deepth(node=node, leaf=leaf) width = horizontal_width(root=node) # need to check depth and width < horizonal_len assert depth < horizontal_len and width < vertical_len hierarchical_emebdding_vec[node_index][leaf_index][depth - 1] = 1.0 hierarchical_emebdding_vec[node_index][leaf_index][horizontal_len + width - 1] = 1.0 return hierarchical_emebdding_vec def upward_ca(interpolation_vec): interpolation_vec_cp = copy.copy(interpolation_vec) tree_depth, tree_width, feature_len = interpolation_vec.shape upward_ca_vec = torch.zeros((tree_depth - 1, tree_width, feature_len), dtype=torch.double) for leaf_index in range(tree_width): for node_index in range(tree_depth - 1): if interpolation_vec_cp[node_index][leaf_index].detach().numpy().any(): # if(torch.is_nonzero(interpolation_vec[node_index][leaf_index])): num_not_null = 1 upward_ca_vec[node_index][leaf_index] = interpolation_vec[tree_depth - 1][leaf_index] for in_node_index in range(node_index, tree_depth - 1): if interpolation_vec_cp[in_node_index][leaf_index].detach().numpy().any(): # if(torch.is_nonzero(interpolation_vec[in_node_index][leaf_index])): upward_ca_vec[node_index][leaf_index] += interpolation_vec[in_node_index][leaf_index] num_not_null += 1 # print(num_not_null) upward_ca_vec[node_index][leaf_index] /= num_not_null # test_upward(upward_ca_vec) return upward_ca_vec def weightedAggregationCoeffi(root: Node): leaf_order, node_order = parse_tree2leaves_node(root=root) tree_depth = len(node_order) tree_width = len(leaf_order) agg_coeffi = torch.zeros((tree_depth), dtype=torch.double) agg_coeffi += torch.tensor([tree_width], dtype=torch.double) leaves_nodes = [parse_tree2leaves_node(rot) for rot in node_order] tree_size = [len(leaves) + len(nodes) for leaves, nodes in leaves_nodes] agg_coeffi += torch.tensor(tree_size, dtype=torch.double) return 1 / agg_coeffi # def weighted_aggregation(upward_ca_vec): # # upward ca vec with dim = node + 1 * leaf * d # dim = upward_ca_vec.shape[2] # no_zero = np.count_nonzero(upward_ca_vec, axis=(1, 2))/dim # upward_ca_sum = np.sum(upward_ca_vec, axis=1) # # no_zero * upward ca sum in each line # weighted_aggregation_vec = upward_ca_sum * np.expand_dims(no_zero, 1) # return weighted_aggregation_vec def test_interpolation(): plan_tree, max_children = parse_dep_tree_text(folder_name="./data") add_node_index(plan_tree[1]) leaf_order, node_order = parse_tree2leaves_node(root=plan_tree[1]) tree_depth = len(node_order) tree_width = len(leaf_order) print(tree_depth, tree_width) test_interpolation = np.zeros((tree_depth, tree_width), dtype=np.double) for leaf_index in range(tree_width): for node_index in range(tree_depth): if is_ancestor(leaf=leaf_order[leaf_index], node=node_order[node_index]): test_interpolation[node_index][leaf_index] = 1 print(test_interpolation) def test_upward(upward_ca_vec): test_upward_vec = torch.sum(upward_ca_vec, dim=-1) print(torch.nonzero(test_upward_vec)) def tree2NodeLeafmat(root: Node): global FEATURE_LEN leaf_order, node_order = parse_tree2leaves_node(root) node_mat = np.array([node.data for node in node_order], dtype=np.double) leaf_mat = np.array([leaf.data for leaf in leaf_order], dtype=np.double) nodemat, leafmat = (torch.from_numpy(node_mat).double(), torch.from_numpy(leaf_mat).double()) return nodemat, leafmat if __name__ == "__main__": # print(os.path.abspath('.')) plan_tree, max_children = parse_dep_tree_text(folder_name="./data") add_node_index(plan_tree[1]) leaf_order, node_order = parse_tree2leaves_node(root=plan_tree[1])
/util/qerror.py
from typing import List import numpy as np def cal_q_error(predict, label, log=True): if log: predict = np.e**predict label = np.e**label if predict > label: q_error = predict / label else: q_error = label / predict return q_error def print_qerror(q_error: List): print("max qerror: {:.4f}".format(max(q_error))) print("mean qerror: {:.4f}".format(np.mean(q_error))) print("media qerror: {:.4f}".format(np.median(q_error))) print("90th qerror: {:.4f}".format(np.percentile(q_error, 90))) print("95th qerror: {:.4f}".format(np.percentile(q_error, 95))) print("99th qerror: {:.4f}".format(np.percentile(q_error, 99)))
/util/result.py
import sys import os import numpy as np sys.path.append(os.path.abspath(os.getcwd())) from util.qerror import cal_q_error, print_qerror with open("/home/jitao/hierarchical_attention/data/dmodel512/resutlv1.0-e10-N4-lr0.001.txt", 'r') as f: lines = f.readlines() label_output = [line.split(' ') for line in lines] label = [float(label) for label, _ in label_output] output = [float(output) for _, output in label_output] len(label) qerror = [cal_q_error(predict, actually) for predict, actually in zip(output, label)] print_qerror(q_error=qerror)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
RobinHeath-Albuquerque/robin_heath_RPSLS
refs/heads/main
{"/main.py": ["/Spock.py", "/Scissors.py", "/Lizard.py", "/Paper.py", "/Game.py", "/Players.py"], "/rpsls.py": ["/Computer.py", "/Game.py"], "/Computer.py": ["/Game.py", "/Players.py"], "/Human.py": ["/Players.py"]}
└── ├── Computer.py ├── Game.py ├── Human.py ├── Lizard.py ├── Paper.py ├── Players.py ├── Scissors.py ├── Spock.py ├── main.py └── rpsls.py
/Computer.py
from Players import Players import random from Game import Game, my_gestures class Computer(Players): def __init__(self, choice): self.choice = random.choice def make_gesture(self): print(self.choice) computer = Computer
/Game.py
from random import randrange, random class Game: def __init__(self, gestures, rules,): self.name = () self.gestures = my_gestures self.rules = my_rules my_gestures = ['rock', 'Spock', 'paper', 'lizard', 'scissors'] my_rules = ['Rock crushes Scissors' 'Scissors cuts Paper', 'Paper covers Rock', 'Rock crushes Lizard', 'Lizard poisons ' 'Spock', 'Spock smashes Scissors', 'Scissors decapitates Lizard', 'Lizard eats Paper', 'Paper disproves Spock', 'Spock vaporizes Rock'] def result(winner_result, player_choice, computer_choice, win=2, lose=2, tie=None): # accumulate the appropriate winner of game total if result == 'win': win += 1 elif result == 'lose': lose += 1 else: tie += 1 return result
/Human.py
from Players import Players class Human(Players): def make_gesture(self): print(self.gestures) playerOne = Human() playerOne.make_gesture() playerTwo = Human()
/Lizard.py
class Lizard: def __init__(self): self.name = 'Lizard' self.loses_to = ['Rock', 'Scissors']
/Paper.py
class Paper: def __init__(self): self.name = 'Paper' self.loses_to = ['Scissors', 'Lizard']
/Players.py
class Players: def __init__(self, types): self.choice = '' self.types = my_players my_players = ['human', 'computer']
/Scissors.py
class Scissors: def __init__(self): self.name = 'Scissors' self.loses_to = ['Rock', 'Spock']
/Spock.py
class Spock: def __init__(self): self.name = 'Spock' self.loses_to = ['Lizard', 'Paper']
/main.py
import RPSLS from Game import Game from Players import Players from Lizard import Lizard from Spock import Spock from Paper import Paper from Scissors import Scissors from Rock import Rock if __name__ == '__main__': game = Game() game.run_game() RPSLS.rpsls("rock") RPSLS.rpsls("Spock") RPSLS.rpsls("paper") RPSLS.rpsls("lizard") RPSLS.rpsls("scissors")
/rpsls.py
import random from Game import Game, my_rules, my_gestures from Computer import Computer, computer from unittest import result x = input('Please enter your name:') print('Hello, ' + x + '. Good luck!') print() print('Here are the rules:') for x in my_rules: print(x) print() print('The best of 3 will win the game!') print() playerOne_score = int(0) computer_score = int(0) score_limit = 5 while playerOne_score != score_limit or computer_score != score_limit: playerOne: str = input(str("Please enter your gesture:")).lower() computer_move = random.choice(my_gestures) print("The computer chooses", computer_move) if computer_move == "rock" and playerOne == "rock": print("Tie!!") if computer_move == "paper" and playerOne == "paper": print("Tie!!") if computer_move == "scissors" and playerOne == "scissors": print("Tie!!") if computer_move == "lizard" and playerOne == "lizard": print("Tie!!") if computer_move == "Spock" and playerOne == "Spock": print("Tie!!") elif computer_move == "paper" and playerOne == "rock" or "Spock": print("The computer scores") computer_score = computer_score + 1 print("The computers score is:", computer_score) elif computer_move == "rock" and playerOne == "paper" or "Spock": print(x + " scores") playerOne_score = playerOne_score + 1 print("Your score is:", playerOne_score) elif computer_move == "rock" and playerOne == "scissors" or "lizard": print("The computer scores") computer_score = int(computer_score) + 1 print("The computers score is:", computer_score) elif computer_move == "scissors" and playerOne == "rock" or "Spock": print(x + " scores") playerOne_score = playerOne_score + 1 print("Your score is:", playerOne_score) elif computer_move == "paper" and playerOne == "scissors" or "lizard": print(x + " scores") playerOne_score = playerOne_score + 1 print("Your score is:", playerOne_score) elif computer_move == "scissors" and playerOne == "paper" or "lizard": print("The computer scores") computer_score = int(computer_score) + 1 print("The computers score is:", computer_score) elif playerOne_score == score_limit: print("Congrats! You won!") elif computer_score == score_limit: print("The computer won, better luck next time")
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
qcscine/conan-recipes
refs/heads/master
{"/artifactory-scripts/delete-old-pkgs.py": ["/artifactory-scripts/artifactory_api.py"]}
└── ├── artifactory-scripts │ ├── artifactory_api.py │ ├── delete-old-pkgs.py │ └── rehost.py ├── irc │ └── conanfile.py ├── mongo-cxx-driver │ └── conanfile.py ├── nauty │ └── conanfile.py ├── serenity │ └── conanfile.py └── xtb └── conanfile.py
/artifactory-scripts/artifactory_api.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ import pytz import requests import sys from datetime import datetime, timedelta class ArtifactoryAPI(object): def __init__(self, api_base, auth): """ Initialize API class with base URI and authentication tuple """ self.api_base = api_base self.auth = auth def api(self, path): return "/".join([self.api_base, path]) def get_conan_repositories(self): """ Fetch list of conan repositories in the artifactory """ api_path = self.api("api/repositories?type=local&packageType=conan") r = requests.get(api_path, auth=self.auth) if not r.ok: msg = "Got response {} for GET {}".format(r.status_code, api_path) raise RuntimeError(msg) return [d["key"] for d in r.json()] def get_path_list_base(self, repo, path): """ Base fn for directory listings """ api_path = self.api("api/storage/{}/{}".format(repo, path)) r = requests.get(api_path, auth=self.auth) if not r.ok: msg = "Got response {} for GET {}".format(r.status_code, api_path) raise RuntimeError(msg) return r.json() def get_subdirectories(self, repo, path): """ Get list of subdirectories for a path """ listing = self.get_path_list_base(repo, path) return [d["uri"].lstrip("/") for d in listing["children"] if d["folder"]] def get_last_updated(self, repo, path): """ Get date object for last updated time of a path """ # Only in Python 3.7 onwards does %z correclty interpret +01:00 suffix # to date format if sys.version_info.major >= 3 and sys.version_info.minor >= 7: date_str = self.get_path_list_base(repo, path)["lastUpdated"] date_format_str = "%Y-%m-%dT%H:%M:%S.%f%z" return datetime.strptime(date_str, date_format_str) date_str = self.get_path_list_base(repo, path)["lastUpdated"] plus_idx = date_str.index("+") date_str = date_str[:plus_idx] date_format_str = "%Y-%m-%dT%H:%M:%S.%f" return datetime.strptime(date_str, date_format_str) def older_than(self, delta, repo, path): """ Returns if a package is older than a supplied time delta """ last_updated = self.get_last_updated(repo, path) if sys.version_info.major >= 3 and sys.version_info.minor >= 7: now = datetime.now(pytz.utc) else: now = datetime.now() return (now - last_updated) > delta def delete(self, repo, path): """ Deletes a path from the artifactory """ api_path = self.api("{}/{}".format(repo, path)) r = requests.delete(api_path, auth=self.auth) if not r.ok: raise RuntimeError( "Got response {} for DELETE {}".format(r.status_code, api_path) ) def list_old_packages(self, repo, user, old_package_delta=timedelta(days=1), preserve_channels=["stable", "master"], preserve_newest=3): """ Lists old packages that could be deleted Descends along user-name-version-channel folder hierarchy. In each channel folder: - If a channel is in preserve_channels, skips the channel - Sorts packages by age - Preserves newest packages as specified by preserve_newest - Selects packages older than old_package_delta Returns a list of full paths to package directories """ if sys.version_info.major >= 3 and sys.version_info.minor >= 7: now = datetime.now(pytz.utc) else: now = datetime.now() old_packages = [] for name in self.get_subdirectories(repo, user): inc_path = "/".join([user, name]) for version in self.get_subdirectories(repo, inc_path): inc_path = "/".join([user, name, version]) for channel in self.get_subdirectories(repo, inc_path): if channel in preserve_channels: continue pkgs_path = "/".join([user, name, version, channel]) def lookup_age(pkg): return self.get_last_updated(repo, "/".join([pkgs_path, pkg])) pkgs = self.get_subdirectories(repo, pkgs_path) # Sort packages by date created (newer first, older last) sorted_pkgs = sorted(pkgs, key=lookup_age, reverse=True) # Always preserve n newest packages non_preserved = sorted_pkgs[preserve_newest:] # Select packages older than set delta older_pkgs = [pkg for pkg in non_preserved if now - lookup_age(pkg) > old_package_delta] # Add to old packages old_packages.extend( ["/".join([pkgs_path, pkg]) for pkg in older_pkgs] ) return old_packages
/artifactory-scripts/delete-old-pkgs.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ from artifactory_api import ArtifactoryAPI import sys """ NOTE: - Conan package order: name/version@user/channel E.g. scine_molassembler/1.0.0@ci/develop - Artifactory storage order: user/name/version/channel E.g. ci/scine_molassembler/version/develop """ if __name__ == "__main__": if len(sys.argv) != 4: raise RuntimeError( "Supply the Artifactory repository URL (e.g. http://localhost:8082/artifactory/api/conan/scine-internal), a username and password as arguments") full_api = sys.argv[1] # Determine api base part splat = full_api.split("/") api_idx = splat.index("api") api_base = "/".join(splat[:api_idx]) repo_name = splat[-1] user = sys.argv[2] passw = sys.argv[3] api = ArtifactoryAPI(api_base=api_base, auth=(user, passw)) old_pkgs = api.list_old_packages(repo_name, "scine") for pkg in old_pkgs: print("{}: {}".format(pkg, api.get_last_updated(repo_name, pkg))) api.delete(repo_name, pkg)
/artifactory-scripts/rehost.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ from conans.client.command import Command, Conan import sys rehost_packages = [ "boost_assert/1.69.0@bincrafters/stable", "boost_base/1.69.0@bincrafters/stable", "boost_config/1.69.0@bincrafters/stable", "boost_container_hash/1.69.0@bincrafters/stable", "boost_core/1.69.0@bincrafters/stable", "boost_detail/1.69.0@bincrafters/stable", "boost_integer/1.69.0@bincrafters/stable", "boost_move/1.69.0@bincrafters/stable", "boost_optional/1.69.0@bincrafters/stable", "boost_predef/1.69.0@bincrafters/stable", "boost_preprocessor/1.69.0@bincrafters/stable", "boost_smart_ptr/1.69.0@bincrafters/stable", "boost_static_assert/1.69.0@bincrafters/stable", "boost_throw_exception/1.69.0@bincrafters/stable", "boost_type_traits/1.69.0@bincrafters/stable", "boost_utility/1.69.0@bincrafters/stable", "icu/63.1@bincrafters/stable", "sqlite3/3.27.2@bincrafters/stable", "sqlitecpp/2.4.0@bincrafters/stable", "yaml-cpp/0.6.3@_/_", "openssl/1.1.1g@_/_", "boost/1.71.0@conan/stable", "bzip2/1.0.8@conan/stable", "eigen/3.3.7@conan/stable", "lapack/3.7.1@conan/stable", "zlib/1.2.11@conan/stable", "gtest/1.10.0@_/_", "cmake/3.17.3@_/_" ] if __name__ == "__main__": target_remote = sys.argv[1] conan_api, _, _ = Conan.factory() # for pkg in rehost_packages: for pkg in rehost_packages: # Which remote should we be getting this from? remote = None if "bincrafters" in pkg: remote = "bincrafters" else: remote = "conan-center" install_args = ["install", "-r", remote, "--build=missing", pkg] print("conan {}".format(" ".join(install_args))) cmd = Command(conan_api) error = cmd.run(install_args) if error != 0: raise RuntimeError("Result is not zero, but {}".format(error)) upload_args = ["upload", "-r", target_remote, "--all", "-c", pkg] print("conan {}".format(" ".join(upload_args))) cmd = Command(conan_api) error = cmd.run(upload_args) if error != 0: raise RuntimeError("Result is not zero, but {}".format(error))
/irc/conanfile.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ from conans import ConanFile, CMake, tools import shutil class IrcConanfile(ConanFile): name = "irc" version = "6d5c7c37" description = "Internal Redundant Coordinates" topics = ("conan", "quantum-chemistry", "chemistry") url = "https://github.com/rmeli/irc" license = "MIT" exports_sources = "CMakeLists.txt" generators = "cmake" settings = "os", "compiler", "arch", "build_type" options = { "shared": [True, False], "fPIC": [True, False], "tests": [True, False] } default_options = {"shared": False, "fPIC": True, "tests": False} requires = [ "eigen/[~=3.3.7]@conan/stable", "boost/[>1.58.0]@conan/stable" ] def config_options(self): if self.settings.os == "Windows": del self.options.fPIC def source(self): # NOTE: Update this when release hits # remote = "https://github.com/RMeli/irc/archive/{}.tar.gz" # tools.get(remote.format(self.version)) # extracted_dir = self.name + "-" + self.version # os.rename(extracted_dir, "sources") # Use the master branch self.run("git clone https://github.com/rmeli/irc.git") self.run("cd irc && git checkout 6d5c7c372d02ecdbd50f8981669c46ddae0638ac") shutil.move("irc", "sources") def _configure_cmake(self): cmake = CMake(self) cmake.definitions["WITH_EIGEN"] = True cmake.definitions["BUILD_TESTS"] = self.options.tests cmake.configure() return cmake def build(self): cmake = self._configure_cmake() cmake.build() cmake.test() def package(self): self.copy(pattern="sources/LICENSE", dst="licenses", keep_path=False) cmake = self._configure_cmake() cmake.install() def package_id(self): # Remove test option from package id computation delattr(self.info.options, "tests") self.info.header_only() def package_info(self): pass
/mongo-cxx-driver/conanfile.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ from conans import ConanFile, CMake, tools from conans.errors import ConanInvalidConfiguration import os class MongoCxxConan(ConanFile): name = "mongo-cxx-driver" version = "3.4.0" url = "http://github.com/bincrafters/conan-mongo-cxx-driver" description = "C++ Driver for MongoDB" license = "Apache-2.0" settings = "os", "compiler", "arch", "build_type" options = { "shared": [True, False], "fPIC": [True, False], "polyfill": ["std", "boost", "mnmlstc", "experimental"] } default_options = {"shared": False, "fPIC": True, "polyfill": "boost"} requires = "mongo-c-driver/1.16.1@scine/stable" build_requires = "cmake/[>3.13.4]@scine/stable" exports = "link_dl.patch" generators = "cmake" _cmake = None _source_subfolder = "source_subfolder" _build_subfolder = "build_subfolder" def configure(self): if self.settings.compiler == 'Visual Studio' and self.options.polyfill != "boost": raise ConanInvalidConfiguration( "For MSVC, best to use the boost polyfill") tools.check_min_cppstd(self, "11") if self.options.polyfill == "std": tools.check_min_cppstd(self, "17") if self.options.polyfill == "boost": self.requires("boost_optional/1.69.0@bincrafters/stable") self.requires("boost_smart_ptr/1.69.0@bincrafters/stable") # Cannot model mnmlstc (not packaged, is pulled dynamically) or # std::experimental (how to check availability in stdlib?) polyfill # dependencies def source(self): remote = "https://github.com/mongodb/mongo-cxx-driver/archive/r{0}.tar.gz" tools.get(remote.format(self.version)) extracted_dir = "mongo-cxx-driver-r{0}".format(self.version) os.rename(extracted_dir, self._source_subfolder) # Mongo-c-driver does not cleanly handle its dependencies # Neither the newer autoconfigured config files nor the deprecated old # ones properly specify static library dependencies (like dl for # openssl) so we add it here. if tools.os_info.is_linux: path = os.path.join(self._source_subfolder, "src", "mongocxx") tools.patch(base_path=path, patch_file="link_dl.patch") def _configure_cmake(self): if self._cmake: return self._cmake self._cmake = CMake(self) self._cmake.definitions["BSONCXX_POLY_USE_MNMLSTC"] = self.options.polyfill == "mnmlstc" self._cmake.definitions["BSONCXX_POLY_USE_STD_EXPERIMENTAL"] = self.options.polyfill == "experimental" self._cmake.definitions["BSONCXX_POLY_USE_BOOST"] = self.options.polyfill == "boost" if tools.os_info.is_linux: self._cmake.definitions["CMAKE_MODULE_LINKER_FLAGS"] = "-ldl" self._cmake.definitions["CMAKE_EXE_LINKER_FLAGS"] = "-ldl" self._cmake.configure(source_dir=self._source_subfolder) return self._cmake def build(self): conan_magic_lines = '''project(MONGO_CXX_DRIVER LANGUAGES CXX) include(../conanbuildinfo.cmake) conan_basic_setup() ''' if self.settings.compiler == "Visual Studio": conan_magic_lines += "add_definitions(-D_ENABLE_EXTENDED_ALIGNED_STORAGE)" cmake_file = os.path.join(self._source_subfolder, "CMakeLists.txt") tools.replace_in_file( cmake_file, "project(MONGO_CXX_DRIVER LANGUAGES CXX)", conan_magic_lines) cmake = self._configure_cmake() cmake.build() def package(self): cmake = self._configure_cmake() cmake.install() def package_info(self): # Need to ensure mongocxx is linked before bsoncxx self.cpp_info.libs = sorted(tools.collect_libs(self), reverse=True) self.cpp_info.includedirs.extend( [os.path.join("include", x, "v_noabi") for x in ["bsoncxx", "mongocxx"]]) if self.options.polyfill == "mnmlstc": self.cpp_info.includedirs.append(os.path.join( "include", "bsoncxx", "third_party", "mnmlstc")) if not self.options.shared: self.cpp_info.defines.extend(["BSONCXX_STATIC", "MONGOCXX_STATIC"])
/nauty/conanfile.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ import os from conans import ConanFile, CMake, tools class NautyConanfile(ConanFile): name = "nauty" version = "2.7r1" description = "Graph Canonical Labeling and Automorphism Group Computation" topics = ("conan", "math", "graph") url = "http://pallini.di.uniroma1.it" license = "Apache-2.0" exports_sources = [ "CMakeLists.txt", "config.cmake.in" ] generators = "cmake" settings = "os", "compiler", "arch", "build_type" options = { "shared": [True, False], "fPIC": [True, False] } default_options = {"shared": False, "fPIC": True} def config_options(self): if self.settings.os == "Windows": del self.options.fPIC def source(self): remote = "http://pallini.di.uniroma1.it/nauty27r1.tar.gz" tools.get(remote) os.rename("nauty27r1", "sources") def _configure_cmake(self): cmake = CMake(self) cmake.configure() return cmake def build(self): cmake = self._configure_cmake() cmake.build() def package(self): self.copy(pattern="sources/COPYRIGHT", dst="licenses", keep_path=False) cmake = self._configure_cmake() cmake.install()
/serenity/conanfile.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ import os from conans import ConanFile, CMake, tools import shutil def microarch(conanfile): """ Determine microarch of compiler and os (CPU chipset family or ISA) """ cmdlist = None regex = None # Note that it doesn't matter if gcc or clang have different names for what # we call microarchitecture here. Any package ID is a hash convolution of # os, compiler, and then the microarch string. Collisions are so unlikely # they're impossible. if conanfile.settings.compiler == "gcc": cmdlist = ["gcc", "-march=native", "-Q", "--help=target"] regex = r"-march=\s+(?P<arch>[A-z0-9]+)" if conanfile.settings.compiler in ["clang", "apple-clang"]: cmdlist = ["clang", "-march=native", "-xc", "-", "-###"] regex = r"\"-target-cpu\"\\s+\"(?P<arch>[A-z0-9]+)\"" if cmdlist is None: return None result = sp.run(cmdlist, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True) result.check_returncode() matcher = re.compile(regex) for match in matcher.finditer(result.stdout): return match.group("arch") for match in matcher.finditer(result.stderr): return match.group("arch") return None class SerenityConanfile(ConanFile): name = "serenity" version = "1.3.0" description = "Serenity: A subsystem quantum chemistry program." topics = ("conan", "quantum-chemistry", "chemistry") url = "https://github.com/qcserenity/serenity" license = "LGPL-3.0" exports_sources = "CMakeLists.txt" generators = "cmake" exports = "serenity.patch" settings = "os", "compiler", "arch", "build_type" options = { "shared": [True, False], "tests": [True, False], "microarch": ["detect", "none"] } default_options = {"shared": False, "tests": False, "microarch": "none"} build_requires = "cmake/[>3.13.3]@scine/stable" requires = [ "zlib/[~=1.2.11]", "hdf5/[=1.12.0]@scine/stable", "eigen/[~=3.3.7]@conan/stable", "boost/[>1.58.0]@conan/stable" ] def source(self): self.run("git clone https://github.com/qcserenity/serenity") self.run("cd serenity && git checkout 1.3.0") shutil.move("serenity", "sources") tools.patch(base_path="sources", patch_file="serenity.patch") def _configure_cmake(self): cmake = CMake(self) cmake.definitions["SERENITY_ENABLE_TESTS"] = self.options.tests if self.options.microarch == "none": cmake.definitions["SERENITY_MARCH"] = "" else: cmake.definitions["SERENITY_MARCH"] = microarch(self) or "" cmake.definitions["HDF5_USE_STATIC_LIBRARIES"] = not self.options['hdf5'].shared cmake.definitions["HDF5_ROOT"] = self.deps_cpp_info["hdf5"].rootpath cmake.configure() return cmake def build(self): cmake = self._configure_cmake() cmake.build() def package(self): self.copy(pattern="sources/LICENSE", dst="licenses", keep_path=False) cmake = self._configure_cmake() cmake.install() def package_id(self): # Remove test option from package id computation delattr(self.info.options, "tests") # Overwrite microarch value in info with detected or make it empty if "microarch" in self.options: if self.options.get_safe("microarch") == "detect": self.info.options.microarch = microarch(self) or "" else: self.info.options.microarch = "" def package_info(self): pass
/xtb/conanfile.py
__copyright__ = """This code is licensed under the 3-clause BSD license. Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group. See LICENSE.txt for details. """ import os from conans import ConanFile, CMake, tools class XtbConanfile(ConanFile): name = "xtb" version = "6.3.2" description = "Semiempirical Extended Tight-Binding Program Package" topics = ("conan", "quantum-chemistry", "chemistry") url = "https://github.com/grimme-lab/xtb" homepage = "https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/xtb/xtb" license = "LGPL-3.0-only" exports = "portable-linalg.patch" exports_sources = "CMakeLists.txt" generators = "cmake" settings = "os", "compiler", "arch", "build_type" options = {"shared": [True, False], "fPIC": [True, False]} default_options = {"shared": False, "fPIC": True} requires = [ "cmake/[>=3.18.0]@scine/stable", "lapack/3.7.1@conan/stable" ] def config_options(self): if self.settings.os == "Windows": del self.options.fPIC def source(self): remote = "https://github.com/grimme-lab/xtb/archive/v{}.tar.gz" tools.get(remote.format(self.version)) extracted_dir = self.name + "-" + self.version os.rename(extracted_dir, "sources") tools.patch(base_path="sources", patch_file="portable-linalg.patch") def _configure_cmake(self): cmake = CMake(self) cmake.configure() return cmake def build(self): cmake = self._configure_cmake() cmake.build() def package(self): self.copy(pattern="sources/COPYING*", dst="licenses", keep_path=False) cmake = self._configure_cmake() cmake.install() def package_info(self): pass
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
chengyan1984/cdk-gui
refs/heads/master
{"/BaseUtil.py": ["/cookie_test.py", "/Util.py"], "/CDKCookieUtil.py": ["/cookie_test.py", "/BaseUtil.py"], "/MIUtil.py": ["/cookie_test.py", "/BaseUtil.py"], "/MideaCookieUtil.py": ["/cookie_test.py", "/BaseUtil.py"], "/SuningUtil.py": ["/cookie_test.py", "/BaseUtil.py"], "/TCSMCookieUtil.py": ["/cookie_test.py", "/BaseUtil.py"], "/login.py": ["/CDKUtil.py"], "/GreeUtil.py": ["/Util.py"], "/huadi_zb.py": ["/Util.py"], "/JDUtil.py": ["/BaseUtil.py"], "/MideaUtil.py": ["/BaseUtil.py"], "/test/http2.py": ["/BaseUtil.py"]}
└── ├── BaseUtil.py ├── CDKCookieUtil.py ├── CDKUtil.py ├── ChromeCookie.py ├── ConkaUtil.py ├── GreeUtil.py ├── JDUtil.py ├── MIUtil.py ├── MideaCookieUtil.py ├── MideaUtil.py ├── SuningUtil.py ├── TCSMCookieUtil.py ├── Util.py ├── aesgcm.py ├── asdfsd.py ├── chrome_cookies_old.py ├── cookie_test.py ├── huadi_zb.py ├── login.py ├── master.py ├── searchutil.py ├── test │ ├── http2.py │ └── test_re.py └── test_text.py
/BaseUtil.py
import re from urllib.parse import urlparse import json import requests from bs4 import BeautifulSoup from datetime import date, timedelta, datetime from Util import Util from cookie_test import fetch_chrome_cookie class BaseUtil(Util): def __init__(self, username, passwd, adminid='15870', factoryid='1', baseurl='https://crm.konka.com', bjdomain='http://north.bangjia.me'): parsed_uri = urlparse(baseurl) self.host = parsed_uri.netloc self.username = username self.passwd = passwd self.baseurl = baseurl self.adminid = adminid self.factoryid = factoryid self.bjdomain = bjdomain self.mainurl = self.baseurl + '/admin/page!main.action' self.searchurl = self.baseurl + '/afterservice/afterservice!api.action' self.session = requests.Session() self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''} self.datafail = {'code': 0, 'msg': '抓单失败,请确认账号密码是否正确'} self.dataverify = {'code': 2, 'msg': '登录过期,请重新登录', 'element': ''} self.headers = {'Content-Type': 'application/json;charset=UTF-8', 'User-Agent': self.agent, 'Referer': self.baseurl, 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Origin': self.baseurl, 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 'Accept': 'application/json, text/plain, */*'} self.initCookie() def getsoup(self, response): response.encoding = 'utf-8' return BeautifulSoup(response.text, features="lxml") def parseHtml(self, htmlstr): bsObj = BeautifulSoup(htmlstr, features="lxml") if not bsObj: return "" return bsObj.text.strip() def getjson(self, response): response.encoding = 'utf-8' try: result = json.loads(response.text) except Exception as e: print("getjson failed:{}".format(str(e))) result = None return result @staticmethod def merge(lst1, lst2, keys, isCover=False): def generate_key(item): if type(keys) == list: return "_".join(str(v) for k, v in item.items() if k in keys) else: return "_".join(str(v) for k, v in item.items() if k == keys) hash_map = {} for item in lst1 + lst2: if isCover: hash_map[generate_key(item)] = item else: hash_map.setdefault(generate_key(item), item) result = list(hash_map.values()) return result if result else [] def initCookie(self, cookies=None): pass def login(self, param=None): pass def loadOrders(self, param=None): pass @staticmethod def getCookie(domains=[], isExact=False): return fetch_chrome_cookie(domains, isExact=isExact) @staticmethod def getCookies(cookie): cookies = dict([l.split("=", 1) for l in cookie.split("; ")]) return cookies @staticmethod def getDateBefore(day): return (date.today() - timedelta(days=day)).strftime("%Y-%m-%d") @staticmethod def clearKey(data, datakey, destkey='address'): if datakey in data and data[destkey] and data[destkey].strip().startswith(data[datakey].strip()): data[destkey] = data[destkey].replace(data[datakey], '', 1).strip() return data @staticmethod def clearAddress(orderinfo, destkey='address'): if destkey not in orderinfo: return orderinfo orderinfo = BaseUtil.clearKey(orderinfo, "province", destkey) orderinfo = BaseUtil.clearKey(orderinfo, "city", destkey) orderinfo = BaseUtil.clearKey(orderinfo, "county", destkey) orderinfo = BaseUtil.clearKey(orderinfo, "town", destkey) return orderinfo @staticmethod def getTimeStr(string, isDefault=True): defaultValue = '00:00:00' if isDefault else '' try: time_str = re.compile(r"\d{2}:\d{1,2}").findall(string)[0] result = time_str if BaseUtil.isTime(time_str) else defaultValue return result except IndexError: return defaultValue @staticmethod def isTime(time_str): return BaseUtil.isTimesecondstr(time_str) or BaseUtil.isTimestr(time_str) @staticmethod def isTimesecondstr(time_str): try: datetime.strptime(time_str, '%H:%M:%S') return True except ValueError: return False @staticmethod def isTimestr(time_str): try: datetime.strptime(time_str, '%H:%M') return True except ValueError: return False @staticmethod def isDatetimestr(datetime_str): try: datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S') return True except ValueError: return False # print("getDateBefore(0)={}".format(BaseUtil.getDateBefore(0)))
/CDKCookieUtil.py
import datetime import json import re import time from urllib import parse from urllib.parse import urlparse import requests from BaseUtil import BaseUtil from cookie_test import fetch_chrome_cookie class CDKCookieUtil(BaseUtil): def __init__(self, username='', passwd='', adminid='24', factoryid='18', baseurl='http://cdk.rrs.com', bjdomain='http://yxgtest.bangjia.me'): super(CDKCookieUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain) self.headers['Accept'] = "application/json, text/plain, */*" self.headers['Content-Type'] = 'application/json' self.cookie = fetch_chrome_cookie([{"domain": ".rrs.com"}], isExact=False) self.cookies = BaseUtil.getCookies(self.cookie) self.headers['Cookie'] = self.cookie self.azbaseurl = '' # cdk安装的baseurl,海尔安装单要用到:http://cdkaz.rrs.com self.azhost = '' # cdk安装的host:cdkaz.rrs.com def loadOrders(self, param=None): # # 开始加载工单 # self.headers['Accept'] = "*/*" # self.headers['Content-Type'] = 'application/json' # try: # data = {"data": json.dumps(list(self.loadPageOrder()))} # requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) # except: # return self.dataverify # return self.datasuccess # print(self.cookies) if not self.islogin(): return self.dataverify isSuccess = True haierRes = self.loadHaierOrder() # 抓取海尔工单 # print("loadHaierOrder result=", haierRes) isSuccess = isSuccess and haierRes['code'] == 1 netorder = self.loadWangdan() # 1: 表示维修 2 表示安装 3 表示鸿合维修单 4 表示清洁保养""" if not netorder: return self.dataverify netRes = self.loadNetworkOrder(netorder, 5) # 抓取网单 - 所有 isSuccess = isSuccess and netRes['code'] == 1 # netRes = self.loadNetworkOrder(netorder, 2) # 抓取网单 - 安装 # isSuccess = isSuccess and netRes['code'] == 1 # netRes = self.loadNetworkOrder(netorder, 1) # 抓取网单 - 维修 # isSuccess = isSuccess and netRes['code'] == 1 # netRes = self.loadNetworkOrder(netorder, 3) # 抓取网单 - 鸿合维修单 # isSuccess = isSuccess and netRes['code'] == 1 # netRes = self.loadNetworkOrder(netorder, 4) # 抓取网单 - 清洁保养 # isSuccess = isSuccess and netRes['code'] == 1 return self.datasuccess if isSuccess else self.datafail def islogin(self): url = self.baseurl + "/manager-web/index.do" if 'userCookie' in self.cookies: url += "?token=" + self.cookies['userCookie'] header = self.headers.copy() header[ 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' # header['Referer'] = self.baseurl response = self.session.get(url, headers=header) soup = self.getsoup(response) # print(soup) haierSpan = soup.find('span', text=re.compile('海尔安装')) # print("+++++++++++++++++++++++++++++++getHaierUrl") # print(haierSpan) if not haierSpan: return False parsed_url = urlparse(haierSpan['href']) self.azhost = parsed_url.netloc self.azbaseurl = parsed_url.scheme + "://" + parsed_url.netloc params = dict(parse.parse_qsl(parsed_url.query)) if 'token' not in params: return False token = params['token'] self.cookies['token'] = token # 进入海尔工单的验证流程 param = json.dumps({"token": params['token'], "moduleCode": "04", "userId": ""}) header = self.headers.copy() header['Host'] = self.azhost header['Origin'] = self.azbaseurl header['Referer'] = self.azbaseurl + "/pages/indexcdk?moduleCode=04&newTopWindow=true&token=" + token r0 = self.session.post(self.azbaseurl + "/api/system/authMenu/auth", data=param, headers=header) r = self.session.post(self.azbaseurl + "/api/system/authMenu/authMenuChanges", data=param, headers=header) # r2 = self.session.post(self.baseurl + "/manager-web/getCdkscIndexData.do", headers=header) return self.isSuccess(r0) and self.isSuccess(r) # and self.isSuccess(r2) def isSuccess(self, r): authresult = self.getjson(r) if not authresult or 'success' not in authresult or not authresult['success']: return False # if 'serviceCode' in authresult and authresult['serviceCode']: # self.serviceCode = authresult['serviceCode'] return True def loadWangdan(self): """加载网单页面""" url = self.baseurl + "/cdkwd/index2?moduleCode=02&token=" + self.cookies['token'] header = self.headers del header['Content-Type'] header[ 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' header['Referer'] = self.baseurl + "/manager-web/index.do?token=" + self.cookies['token'] header['Upgrade-Insecure-Requests'] = "1" response = self.session.get(url, headers=header) soup = self.getsoup(response) # print(soup) haierSpan = soup.find('div', text=re.compile('网单全流程')) print("+++++++++++++++++++++++++++++++loadWangdan") print(haierSpan) if not haierSpan: return False netorder = {'0': url, # '1': self.baseurl + soup.find('div', text=re.compile('维修单'))['href'], # '2': self.baseurl + soup.find('div', text=re.compile('安装单'))['href'], # '3': self.baseurl + soup.find('div', text=re.compile('鸿合维修单'))['href'], # '4': self.baseurl + soup.find('div', text=re.compile('清洁保养'))['href'] '5': self.baseurl + soup.find('div', text=re.compile('网单全流程'))['href'] } # 1: 表示维修 2 表示安装 3 表示鸿合维修单 4 表示清洁保养""" 5 表示全流程 return netorder def loadNetworkOrder(self, netorder, ordertype=2): """:ordertype = 5:所有网单 1: 表示维修 2 表示安装 3 表示鸿合维修单 4 表示清洁保养""" api_path = netorder[str(ordertype)] # print("***********************************loadNetworkOrder,url={}".format(apiPath)) header = self.headers header['Referer'] = netorder['0'] self.session.get(api_path, headers=header) header['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' header[ 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' header['X-Requested-With'] = "XMLHttpRequest" header['Accept-Encoding'] = "gzip, deflate" header['Referer'] = api_path header['Upgrade-Insecure-Requests'] = '1' header['Cache-Control'] = 'max-age=0' apiPath = '/cdkwd/azdOrder/azdOrderList' if ordertype == 1: apiPath = '/cdkwd/repairOrder/repairOrderList' elif ordertype == 3: apiPath = '/cdkwd/wxRepairOrder/repairOrderList' elif ordertype == 4: apiPath = '/cdkwd/byOrder/byOrderList' elif ordertype == 5: apiPath = '/cdkwd/deliveryOrder/deliveryOrderList' today = datetime.date.today() # 获得今天的日期 pageUrl = self.baseurl + apiPath pageUrl = pageUrl + "?orderDateBegin=" + (today - datetime.timedelta(days=26)).strftime( '%Y-%m-%d') + "&orderDateEnd=" + datetime.date.today().strftime('%Y-%m-%d') pageUrl += "&orderCode=&orderId=&consignee=&length=150&consigneeMobile=&deliveryDateBegin=&deliveryDateEnd=&branchCodeYw=&orderStatus=&carDriver=&carPhone=&province=&city=&regionCode=&consigneeAddr=&carNo=&oldOrder=&isYy=&serviceArea=&serviceCodeYw=" # params = dict(parse.parse_qsl(parsed_url.query)) # print("pageUrl={}".format(pageUrl)) params = {} params['draw'] = "2" if ordertype == 2 else "1" # 1为维修 2为安装 params['order[0][column]'] = "2" params['order[0][dir]'] = "desc" params['start'] = 0 params['length'] = 150 orderRes = self.session.get(pageUrl, headers=header) orderRes.encoding = 'utf-8' # print("params=",params) # print("headers=",header) # print("loadNetworkOrder order result={}".format(orderRes.text)) if orderRes.status_code != 200 or not orderRes.text or len(orderRes.text.strip()) <= 0: return self.datafail orderResult = self.getjson(orderRes) if 'recordsTotal' in orderResult and orderResult['recordsTotal'] > 0: try: order_list = list(self.load_wd_orders(orderResult)) print(order_list) except Exception as e: error = self.datafail.copy() error['msg'] = str(e) return error checkRes = requests.post(self.bjdomain + "/Api/Climborder/addorder", data={"data": json.dumps(order_list)}) checkRes.encoding = 'utf-8' if checkRes and checkRes.status_code == 200: print("网单同步成功") return self.datasuccess return self.datasuccess def load_wd_orders(self, orderResult): # 加载网单列表 for r in orderResult['data']: description = "原单号:{},工单方式:{},司机:{}|{},联系人:{}|{}".format(r['sourceSn'], r['installWayName'] or '', r['carDriver'] or '', r['carPhone'] or '', r['fhContact'] or '', r['fhMobile'] or '') curtime = int(time.time()) r_time = r['reserveTime'] if r['reserveTime'] else r['deliveryDate'] or str(curtime) ordername = r['typeCodeName'] if "typeCodeName" in r and r['typeCodeName'] else "" order_info = {'factorynumber': r['orderId'], 'ordername': ordername, 'username': r['consignee'], 'mobile': r['consigneeMobile'], 'orderstatus': r['orderStatusName'], 'machinetype': r['add8'], 'province': r['province'], 'city': r['city'], 'county': r['region'], 'address': r['consigneeAddr'], 'description': r['add12'], 'ordertime': str(datetime.datetime.fromtimestamp(int(r['createdDate']) / 1000)), 'repairtime': str(datetime.datetime.fromtimestamp(int(r_time) / 1000)), 'buydate': str(datetime.datetime.fromtimestamp(int(r['accountDate']) / 1000)), 'machinebrand': '海尔', 'version': r['add5'], 'note': description, 'companyid': self.factoryid, 'adminid': self.adminid, 'originname': r['sourceCodeName'], 'branchCodeYw': r['branchCodeYw'], 'serviceCodeYw': r['serviceCodeYw'] } order_info = self.clearAddress(order_info) if not self.isNew(order_info, self.bjdomain, self.adminid): continue yield from self.load_wd_info(order_info) def load_wd_info(self, info): # 加载网单详情 info_url = self.baseurl + "/cdkwd/deliveryOrder/orderInfo?orderId={}&branchCode={}&serviceCode={}".format( info['factorynumber'], info['branchCodeYw'], info['serviceCodeYw']) res = self.session.get(info_url, headers=self.headers) soup = self.getsoup(res) # print("load_wd_info result=", soup) m = info['mobile'] c = m.count('*') # print("mobile=", m, "* count=", c) mobiles = re.findall(re.compile(r'[>]({})[<]'.format(m.replace("*" * c, "[0-9]{" + str(c) + "}"))), res.text) if mobiles and len(mobiles) > 0: mobile = mobiles[0] info['mobile'] = mobile.split('-')[0] info['description'] = "收货人手机:" + mobile machines = soup.find("tbody").find('tr').find_all('td') if machines and len(machines) > 5: info['machinebrand'] = machines[0].text.strip() info['machinetype'] = machines[1].text.strip() info['version'] = machines[5].text.strip().replace(info['machinebrand'], '').replace(info['machinetype'], "") info['sn'] = machines[4].text.strip() yield info def loadHaierOrder(self): pageUrl = self.azbaseurl + '/api/businessData/serviceList/selectServiceDealList' # print("***********************************loadHaierOrder,pageUrl=" + pageUrl) params = {} today = datetime.date.today() # 获得今天的日期 params['jobStatus'] = '1#3' # 只需要一种未派人状态 空则为全部, 1#3#4#5 params['regTimeStart'] = (today - datetime.timedelta(days=3)).strftime('%Y-%m-%d %H:%M:%S') params['regTimeEnd'] = (today + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S') params['pageIndex'] = 1 params['rows'] = 50 params['token'] = self.cookies['token'] header = self.headers.copy() header['Referer'] = 'http://cdkaz.rrs.com/pages/cdkinstall/serveprocess' params = json.dumps(params) header['Content-Length'] = str(len(params)) header['Host'] = self.azhost header['Origin'] = self.azbaseurl # print("loadHaierOrder params:") # print("params=", params) # print("header=", header) # print("pageUrl=", pageUrl) orderRes = self.session.post(pageUrl, data=params, headers=header) # print(orderRes.text) orderResult = self.getjson(orderRes) if orderRes.status_code == 200 and 'success' in orderResult and orderResult['success'] and orderResult['data'] \ and 'records' in orderResult['data'] and orderResult['data']['records']: data = orderResult['data'] records = data['records'] pageCount = data['pageCount'] pageSize = data['pageSize'] rowCount = data['rowCount'] firstResult = data['firstResult'] # print(len(records)) print('pageCount=%s,pageSize=%s,rowCount=%s,firstResult=%s' % (pageCount, pageSize, rowCount, firstResult)) order_list = [] try: for record in records: ordername = record['orderFlagcode'] if record['orderFlagcode'] else "" order_info = {'factorynumber': record['woId'], 'ordername': ordername, 'username': record['customerName'], 'mobile': record['customerPhone'], 'orderstatus': '待派单', 'machinetype': record['productName'], 'address': record['address'], 'ordertime': record['assignDate'], 'repairtime': record['serviceDate'], 'description': record['reflectSituation'], 'version': record['modelName'], 'sn': record['model'], 'companyid': self.factoryid, 'machinebrand': '海尔', 'originname': 'CDK', 'adminid': self.adminid} order_list.append(order_info) except Exception as e: print(order_list) error = self.datafail.copy() error['msg'] = str(e) return error checkRes = requests.post(self.bjdomain + "/Api/Climborder/addorder", data={"data": json.dumps(order_list)}) checkRes.encoding = 'utf-8' if checkRes and checkRes.status_code == 200: print("海尔工单同步成功") return self.datasuccess return self.datasuccess if __name__ == '__main__': util = CDKCookieUtil('66004185', 'Dw147259', adminid='24', factoryid='18') print(util.loadOrders())
/CDKUtil.py
import datetime import json import os import re import random import sys from urllib import parse from urllib.parse import urlparse import requests from PIL import Image from io import BytesIO from bs4 import BeautifulSoup # from useragent import agents class CDKUtil: def __init__(self, username='', passwd='Dw147259', token=None): self.baseurl = "http://cdk.rrs.com" self.mainurl = 'http://cdk.rrs.com/manager-web/index.do' self.session = requests.Session() # self.agent = random.choice(agents) self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36' self.guidStr = CDKUtil.guid() self.token = token self.orderurl = '' self.username = username self.passwd = passwd @staticmethod def guid(): import uuid s_uuid = str(uuid.uuid4()) l_uuid = s_uuid.split('-') s_uuid = ''.join(l_uuid) s_uuid = s_uuid[:12] + "4" + s_uuid[13:] return s_uuid def generateCode(self): self.guidStr = CDKUtil.guid() # 动态加载验证码图片 captchaUrl = self.baseurl + "/login/generateCode?redisKey=" + self.guidStr print("generateCode guidStr=%s,captchaUrl=%s" % (self.guidStr, captchaUrl)) response = self.session.get(captchaUrl) return Image.open(BytesIO(response.content)) # _code = OCRUtil.getCode(img, config_cdk, tesseract_path) # print("generateCode captchaUrl: %s ,getCode :%s" % (captchaUrl, _code)) # 校验验证码 def checkCode(self, code, name, passwd): self.username = name self.passwd = passwd params = {"redisKey": self.guidStr, "checkCode": code} headers = {'content-type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': self.agent, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive', 'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8', 'Host': 'cdk.rrs.com'} checkRes = self.session.post(self.baseurl + "/login/checkCode", data=json.dumps(params), headers=headers) print('=========================checkCode') checkResult = json.loads(checkRes.text) # print(checkResult) # 验证码正确 if checkResult and checkResult['result'] == '1': print("=========================验证成功") codeFaultTimes = 0 return self.login(code, name, passwd) else: # 重新加载图片验证 验证码 return False def login(self, code, username, passwd): # 校验通过,模拟登陆 params = {"loginname": username, "loginpwd": passwd, "returnUrl": "http://cdk.rrs.com/manager-web/index.do", "checkCode": code} r = self.session.post(self.baseurl + "/login", data=params) r.encoding = 'utf-8' # 登录成功进入主界面 if r.status_code == 200: mainhtml = BeautifulSoup(r.text, features="lxml") # print(mainhtml) # print("=========================") # print(r.headers) return self.getHaierUrl(mainhtml) # 重定向到location elif r.status_code == 302: # location = r.headers.getheader('Location') location = r.headers['Location'] if location: # testcdk(name=name, passwd=passwd, url=location) return False # testcdk(name=name, passwd=passwd, url=baseurl + "/login.html?ReturnUrl=" + mainurl) return False def getHaierUrl(self, soap): # haierSpan = mainhtml.find("div", {"id": "serviceDiv"}).span haierSpan = soap.find('span', text=re.compile('海尔安装')) print("+++++++++++++++++++++++++++++++getHaierUrl") print(haierSpan) if not haierSpan: # testcdk(name=name, passwd=passwd, url=mainurl + "?token=" + self.token) return False haierUrl = haierSpan['href'] return self.loadHaier(haierUrl) # 加载海尔安装模块 def loadHaier(self, url): session = requests.Session() print("loadHaier url=" + url) haierMain = session.get(url) if haierMain.status_code == 200: soap = BeautifulSoup(haierMain.text, features="lxml") soap.encoding = 'utf-8' # print(soap) # 返回3个js polyfills.c38c86ad444630494a92.bundle.js main.4b3d8dea306811e889d6.bundle.js # http://cdkaz.rrs.com/inline.1557c7584b9dbbbbbcec.bundle.js return self.authAndgetMenu(url) # haierUrl = soap.find('a', text=re.compile('服务处理'))['href'] # orderMain = loadHaier(session, baseurl + haierUrl) # print(orderMain) else: return False # url = http://cdkaz.rrs.com/pages/cdkinstall/serveprocess?moduleCode=04&newTopWindow=true&token=168E4C1CDFF64967C3336A8ADF0CDB1B def authAndgetMenu(self, url): # 请求验证 auth = 'http://cdkaz.rrs.com//api/system/authMenu/auth' parsed_url = urlparse(url) print("========----------=============") print(parsed_url) haierBaseUrl = parsed_url.scheme + "://" + parsed_url.netloc pageUrl = haierBaseUrl + parsed_url.path params = dict(parse.parse_qsl(parsed_url.query)) self.token = params['token'] # 给全局变量赋值 token headers = {'content-type': 'application/json; charset=utf-8', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': self.agent, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive', 'Accept': 'application/json, text/plain, */*', 'Host': parsed_url.netloc, 'Origin': haierBaseUrl} checkRes = self.session.post(auth, data=json.dumps(params), headers=headers) checkRes.encoding = 'utf-8' # print(checkRes.text) authResult = json.loads(checkRes.text) # {token=168E4C1CDFF64967C3336A8ADF0CDB1B moduleCode=04 userId=''} if checkRes.status_code == 200 and authResult['success']: menuUrl = 'http://cdkaz.rrs.com//api/system/authMenu/authMenuChanges' menuRes = self.session.post(menuUrl, data=json.dumps(params), headers=headers) menuRes.encoding = 'utf-8' menuResult = json.loads(menuRes.text) # print("========----------=============") # print(menuRes.text) if menuRes.status_code == 200 and menuResult['success']: for data in menuResult['data']: # print(data) # print("========") for children in data['children']: for childitem in children['children']: # print(childitem) # print("-------") if childitem['text'] == '服务处理': self.orderurl = haierBaseUrl + childitem['link'] + "?" + str(parse.urlencode(params)) self.updateUser(self.username, self.passwd, self.orderurl) return self.loadHaierOrder() return False # 重新登录 def loadHaierOrder(self): print("loadHaierOrder url=" + self.orderurl) parsed_url = urlparse(self.orderurl) apipath = '/api/businessData/serviceList/selectServiceDealList' print("***********************************") haierBaseUrl = parsed_url.scheme + "://" + parsed_url.netloc pageUrl = haierBaseUrl + apipath params = dict(parse.parse_qsl(parsed_url.query)) today = datetime.date.today() # 获得今天的日期 params['jobStatus'] = '1#3' # 只需要一种未派人状态 空则为全部, 1#3#4#5 params['regTimeStart'] = (today - datetime.timedelta(days=6)).strftime('%Y-%m-%d %H:%M:%S') params['regTimeEnd'] = (today + datetime.timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S') params['pageIndex'] = 1 params['rows'] = 50 headers = {'content-type': 'application/json', 'User-Agent': self.agent, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive', 'Accept': 'application/json, text/plain, */*', 'Host': parsed_url.netloc, 'Origin': haierBaseUrl, 'Referer': self.orderurl} params = json.dumps(params) headers['Content-Length'] = str(len(params)) print("loadHaierOrder params:") # print(params) # print(headers) orderRes = self.session.post(pageUrl, data=params, headers=headers) orderRes.encoding = 'utf-8' # print(orderRes.text) orderResult = json.loads(orderRes.text) if orderRes.status_code == 200 and orderResult['success'] and orderResult['data']: data = orderResult['data'] records = data['records'] pageCount = data['pageCount'] pageSize = data['pageSize'] rowCount = data['rowCount'] firstResult = data['firstResult'] print(len(records)) print('pageCount=%s,pageSize=%s,rowCount=%s,firstResult=%s' % (pageCount, pageSize, rowCount, firstResult)) new_datas = {} order_list = [] for record in records: ordername = "安装" if "安装" in record['orderFlagcode'] else "维修" order_info = {'factorynumber': record['woId'], 'ordername': ordername, 'username': record['customerName'], 'mobile': record['customerPhone'], 'orderstatus': '待派单', 'machinetype': record['productName'], 'address': record['address'], 'ordertime': record['assignDate'], 'repairtime': record['serviceDate'], 'description': record['reflectSituation'], 'version': record['modelName'], 'sn': record['model'], 'companyid': 18, 'machinebrand': '海尔', 'originname': 'CDK', 'adminid': '26073'} order_list.append(order_info) checkRes = requests.post("http://north.bangjia.me/Api/Climborder/addorder", data={"data": json.dumps(order_list)}) checkRes.encoding = 'utf-8' if checkRes and checkRes.status_code == 200: print("同步成功") return True # for record in records: # new_datas[record['woId']] = Order(username=record['customerName'], orderno=record['woId'], # originno=record['sourceCode'], # mobile=record['customerPhone'], address=record['address'], # machineversion=record['modelName'], # data=json.dumps(record), token=token, uname=name) # for each in Order.query.filter(Order.orderno.in_(new_datas.keys())).all(): # # Only merge those posts which already exist in the database # # data = new_datas.pop(list(new_datas.keys()).index(each.orderno)) # data = new_datas.pop(each.orderno, None) # each.uname = name # # print("data=" + str(data)) # # if data: # # print("data orderno=" + data.orderno) # # db.session.merge(data) # # # Only add those posts which did not exist in the database # db.session.add_all(new_datas.values()) # # # Now we commit our modifications (merges) and inserts (adds) to the database! # db.session.commit() return False def updateUser(self, name, passwd, orderurl): userinfo = {"username": name, "passwd": passwd, "token": self.token, 'islogin': True, 'orderurl': orderurl} userfile = os.path.join(os.path.split(os.path.abspath(sys.argv[0]))[0], "file", "user.txt") with open(userfile, 'w') as f: jsObj = json.dumps(userinfo) f.write(jsObj) if __name__ == '__main__': # util = JDUtil('24', factoryid='19') util = CDKUtil()
/ChromeCookie.py
import os import json import base64 import win32crypt from Crypto.Cipher import AES import sqlite3 ''' [(0, 'creation_utc', 'INTEGER', 1, None, 0), (1, 'host_key', 'TEXT', 1, None, 0), (2, 'name', 'TEXT', 1, None, 0), (3, 'value', ' TEXT', 1, None, 0), (4, 'path', 'TEXT', 1, None, 0), (5, 'expires_utc', 'INTEGER', 1, None, 0), (6, 'is_secure', 'INTEGER', 1, No ne, 0), (7, 'is_httponly', 'INTEGER', 1, None, 0), (8, 'last_access_utc', 'INTEGER', 1, None, 0), (9, 'has_expires', 'INTEGER', 1 , '1', 0), (10, 'is_persistent', 'INTEGER', 1, '1', 0), (11, 'priority', 'INTEGER', 1, '1', 0), (12, 'encrypted_value', 'BLOB', 0 , "''", 0), (13, 'samesite', 'INTEGER', 1, '-1', 0), (14, 'source_scheme', 'INTEGER', 1, '0', 0)] ''' sql = """ SELECT host_key, name, path,encrypted_value as value FROM cookies """ def get_decrypted_key(): path = r'%LocalAppData%\Google\Chrome\User Data\Local State' path = os.path.expandvars(path) with open(path, 'r', encoding='utf8') as file: encrypted_key = json.loads(file.read())['os_crypt']['encrypted_key'] encrypted_key = base64.b64decode(encrypted_key) # Base64 decoding encrypted_key = encrypted_key[5:] # Remove DPAPI decrypted_key = win32crypt.CryptUnprotectData(encrypted_key, None, None, None, 0)[1] # Decrypt key # print("decrypt",decrypted_key) return decrypted_key # get cookie def get_chrome_cookie(): cookies_path = os.environ['HOMEPATH'] + r'\AppData\Local\Google\Chrome\User Data\Default\Cookies' cookies_path = os.path.join(os.environ['LOCALAPPDATA'], os.environ['HOMEPATH'], cookies_path) con = sqlite3.connect(cookies_path) res = con.execute(sql).fetchall() # names = con.execute('PRAGMA table_info([cookies])').fetchall() # print(names) con.close() # print(res) return res def decrypt_chrome_cookie(decrypted_key, data): # data = bytes.fromhex('763130...') # the encrypted cookie if data[:3] == b'v10': nonce = data[3:3 + 12] ciphertext = data[3 + 12:-16] tag = data[-16:] cipher = AES.new(decrypted_key, AES.MODE_GCM, nonce=nonce) # plaintext = cipher.decrypt_and_verify(ciphertext, tag) # the decrypted cookie plaintext = cipher.decrypt(ciphertext) # print(plaintext) return plaintext else: # print('old cookie none decrypt') return "" def fetch_chrome_cookies(domain=''): res = get_chrome_cookie() list = [] for i in res: if domain in i[0]: item = {} # print(type(i[3]),i[3]) data = i[3] # the encrypted cookie key = get_decrypted_key() plaintext = decrypt_chrome_cookie(key, data) plaintext = str(plaintext, encoding="utf-8") # print("host:", i[0], "name:", i[1], "path:", i[2], "value:", plaintext) item["host"] = i[0] item["name"] = i[1] item["path"] = i[2] item["value"] = plaintext list.append(item) return list def fetch_chrome_cookie(domain=''): cookie_list = fetch_chrome_cookies(domain) cookieValue = '' for item in cookie_list: cookieValue += item['name'] + '=' + item['value'] + '; ' # print("fetch_chrome_cookie:" + cookieValue) return cookieValue[:-1] if __name__ == '__main__': print(fetch_chrome_cookie('xiaomi.com'))
/ConkaUtil.py
import json from urllib.parse import urlparse import requests class ConkaUtil: def __init__(self, username, passwd, adminid='15870', factoryid='1', baseurl='https://crm.konka.com', bjdomain='http://north.bangjia.me'): parsed_uri = urlparse(baseurl) self.host = parsed_uri.netloc self.username = username self.passwd = passwd self.baseurl = baseurl self.adminid = adminid self.factoryid = factoryid self.bjdomain = bjdomain self.mainurl = self.baseurl + '/admin/page!main.action' self.searchurl = self.baseurl + '/afterservice/afterservice!api.action' self.session = requests.Session() self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \ 'Chrome/81.0.4044.113 Safari/537.36' self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''} self.datafail = {'code': 0, 'msg': '抓单失败,请确认账号密码是否正确'} self.headers = {'Content-Type': 'application/json;charset=UTF-8', 'User-Agent': self.agent, 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Origin': self.baseurl, 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 'Accept': 'application/json, text/plain, */*'} def loadMain(self): loginurl = self.baseurl + "/services/organization/api/authenticate" data = {"username": self.username, "password": self.passwd, "rememberMe": True} self.headers['Referer'] = self.baseurl response = self.session.post(loginurl, headers=self.headers, data=json.dumps(data)) response.encoding = 'utf-8' author = response.headers['Authorization'] self.headers['Authorization'] = author # print("loadMain author={}".format(author)) return self.getUserInfo() def getUserInfo(self): loginurl = self.baseurl + "/services/organization/api/current/dept/info" self.headers['Referer'] = self.baseurl response = self.session.get(loginurl, headers=self.headers) response.encoding = 'utf-8' return self.login() def login(self): loginurl = self.baseurl + "/services/organization/api/ourmUser/login" self.headers['Referer'] = self.baseurl response = self.session.get(loginurl, headers=self.headers) response.encoding = 'utf-8' return self.getOrgInfo() def getOrgInfo(self): loginurl = self.baseurl + "/services/organization/api/ourmUser/list" self.headers['Referer'] = self.baseurl response = self.session.get(loginurl, headers=self.headers) response.encoding = 'utf-8' params = [ # {"betweenMap": {}, "dto": {"status": "DISTRIBUTING"}, "extMap": {}, "searchMap": {}}, {"dto": {"status": "ACCEPTED"}, "pageIndex": 1, "pageSize": 50}, {"dto": {"status": "RESERVATION"}, "pageIndex": 1, "pageSize": 50}] orderlist = [] for param in params: orders = self.loadOrders(param) if orders and len(orders) > 0: orderlist += orders print("orderlist count={} orderlist={}".format(len(orderlist), orderlist)) try: data = {"data": json.dumps(orderlist)} requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except Exception as e: print("addorder failed:", e) return self.datafail return self.datasuccess def loadOrders(self, param=None): orderurl = self.baseurl + "/services/distributeproce/api/repair/acl/_search/page" # RESERVATION 待确认 ACCEPTED 待预约 DISTRIBUTING 待接单 VISIT 待完工 # 维修任务 # {"betweenMap":{},"dto":{"type":"REPAIR_ACL_OWN_NOT"},"searchMap":{"status":{"opt":"IN","value":"SUBMIT,ACCEPTED,RESERVATION,VISIT"}},"pageIndex": 1,"pageSize":10} # params = {"betweenMap": {}, "dto": {"status": "DISTRIBUTING"}, "extMap": {}, "searchMap": {}, "pageIndex": 1, "pageSize": 50} # params = {"dto": {"status": "ACCEPTED"}, "pageIndex": 1, "pageSize": 50} self.headers['Request-Source'] = 'PC' self.headers['Sec-Fetch-Dest'] = 'empty' response = self.session.post(orderurl, data=json.dumps(param), headers=self.headers) response.encoding = 'utf-8' datas = json.loads(response.text) # print("====================================loadOrders") # print(params) # print(response.text) if datas['status'] == 200: try: return self.parseOrders(datas) except Exception as e: print("addorder failed:", e) return [] def parseOrders(self, datas): total_num = datas['data']['totalElements'] order_list = [] for order_key in datas['data']['content']: # repairSubOrderNum :"PD2020042801002-01" repairNum :"PD2020042801002" reportNum :BDX2020042800717 repairtime = order_key['reservationDate'] if not order_key['reservationFirstTime'] else order_key[ 'reservationFirstTime'] if not order_key['reservationSuccessTime'] else order_key[ 'reservationSuccessTime'] if repairtime: repairtime = repairtime.replace("T", ' ') orderno = order_key['repairSubOrderNum'] if order_key['repairSubOrderNum'] else order_key['reportNum'] order_info = {'factorynumber': orderno, 'ordername': order_key['serviceTypeName'], 'username': order_key['purchaserName'], 'mobile': order_key['purchaserPhone'], 'orderstatus': order_key['statusName'], 'originname': '康佳系统', 'mastername': order_key['repairAclName'], 'machinetype': order_key['seriesName'], 'machinebrand': '康佳', 'sn': '', 'companyid': self.factoryid, 'adminid': self.adminid, 'address': str(order_key['purchaserReportAddress']), 'province': order_key['provinceName'], 'city': order_key['cityName'], 'county': order_key['regionName'], 'town': order_key['countyName'], 'ordertime': order_key['createdDate'], 'repairtime': repairtime, 'note': str(order_key['brandName']) + str(order_key['serviceNatureName']), 'description': order_key['userFaultDesc'], } order_list.append(order_info) return order_list if __name__ == '__main__': util = ConkaUtil('K608069', 'Crm@20200401', adminid='15870', factoryid='1') # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1') # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1') print(util.loadMain())
/GreeUtil.py
import json from datetime import date, timedelta from urllib.parse import urlparse, urlencode, unquote import requests from Util import Util class GreeUtil(Util): def __init__(self, username, passwd, adminid='15870', factoryid='1', baseurl='http://116.6.118.169:7909', bjdomain='http://fatest.bangjia.me'): parsed_uri = urlparse(baseurl) self.host = parsed_uri.netloc self.username = username self.passwd = passwd self.baseurl = baseurl self.adminid = adminid self.factoryid = factoryid self.bjdomain = bjdomain self.loginurl = self.baseurl + "/hjzx/loginAction_login" self.mainurl = self.loginurl self.searchurl = self.baseurl + '/hjzx/afterservice/afterservice!api.action' self.session = requests.Session() self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \ 'Chrome/81.0.4044.113 Safari/537.36' self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''} self.datafail = {'code': 0, 'msg': '抓单失败,请确认账号密码是否正确'} self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': self.agent, 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Referer': self.baseurl, 'Origin': parsed_uri.scheme + "://" + parsed_uri.netloc, 'Accept-Encoding': 'gzip, deflate', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,' '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'} def isLogin(self): response = self.session.get(self.loginurl, headers=self.headers) response.encoding = 'utf-8' # print(response.status_code) # print("isLogin response={}".format(response.text)) return "新派工系统--&gt;主界面" in response.text def login(self): data = {"usid": self.username, "pswd": self.passwd, "loginflag": "loginflag"} response = self.session.post(self.loginurl, headers=self.headers, data=urlencode(data)) response.encoding = 'utf-8' # print("login result={}".format(response.text)) if response.status_code == 200: return "新派工系统--&gt;主界面" in response.text return False def loadMain(self): if not self.isLogin() and not self.login(): return self.datafail headers = self.headers.copy() headers['Referer'] = self.baseurl + '/hjzx/menu.jsp' # 加载安装工单查询 url = self.baseurl + "/hjzx/az/doListLcLsAz?otype=az&xsorsh=1&cd=pgcx" response = self.session.get(url, headers=headers) # response.encoding = 'utf-8' # print("loadMain response={}".format(response)) if response.status_code != 200: return self.datafail # return list(self.search(url)) try: data = {"data": json.dumps(list(self.search(url)))} # print("loadMain data = {}".format(data)) result = requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) # print(result.text) except Exception as e: print("addorder failed:", e) return self.datafail return self.datasuccess def search(self, url, page=1, totalcount=0, pagesize=50): headers = self.headers.copy() headers['Referer'] = url today = date.today() data = {"otype": "az", "xsorsh": "1", "cd": "pgcx", "s_azAssign.s_spid": "102", # 商用空调 "s_azAssign.s_cjdt_from": (today).strftime('%Y-%m-%d %H:%M:%S'), "s_azAssign.s_cjdt_to": (today + timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S'), "isFirstPage": "true" if page == 1 else "false", "paged": str(page) } response = self.session.post(self.baseurl + "/hjzx/az/doListLcLsAz", headers=headers, data=urlencode(data)) bsObj = self.getsoup(response) totalcount = int(bsObj.find("span", {"id": "totalRecord"}).text.strip()) print("search totalcount={}".format(totalcount)) # isall = (page + 1) * pagesize >= totalcount isall = True tbody = bsObj.find("table", {"id": "tbody"}).find("tbody") if isall: yield from self.parseOrders(tbody.find_all("tr")) else: yield from self.parseOrders(tbody.find_all("tr")) yield from self.search(url, page + 1, totalcount, pagesize) def parseOrders(self, trlist): for tr in trlist: tablecolumns = tr.find_all("td") if tr and len(tablecolumns) > 2: data = self.parseorder(tablecolumns) if data: detailUrl = self.baseurl + "/hjzx/az/" + tablecolumns[0].find("a")['href'] data = self.orderdetail(data, detailUrl) # print("parseorder data={}".format(data)) yield data def parseorder(self, tablecolumns): try: data = {} data['factorynumber'] = tablecolumns[2].text.strip() data['username'] = tablecolumns[4].text.strip() data['mobile'] = tablecolumns[5].text.strip() data['address'] = tablecolumns[6].text.strip() data['createname'] = tablecolumns[8].text.strip() data['ordertime'] = tablecolumns[9].text.strip() # 创建时间 data['companyid'] = self.factoryid data['machinebrand'] = "格力" data['machinetype'] = "商用空调" data['orgname'] = tablecolumns[10].text.strip() data['note'] = tablecolumns[12].text.strip() data['adminid'] = self.adminid data['description'] = "当前处理网点:{},处理结果跟踪:{},备注:{}".format( tablecolumns[10].text.strip(), tablecolumns[11].text.strip(), tablecolumns[12].text.strip()) # 具体描述 return data if self.isNew(data) else None except Exception as e: print("parseorder exception", e) return None def isNew(self, data): res = requests.post(self.bjdomain + "/Api/Climborder/checkexist", data={"orderno": data['factorynumber'], 'adminid': self.adminid}) return self.checkBjRes(res) def orderdetail(self, data, detailUrl): headers = self.headers.copy() headers['Referer'] = self.baseurl + "/hjzx/az/doListLcLsAz" # 加载安装工单查询 response = self.session.get(detailUrl, headers=headers) response.encoding = 'utf-8' # print(response.url) # print("orderdetail response={}".format(response.text)) if response.status_code != 200: return data bsObj = self.getsoup(response) # data['mastername'] = tablecolumns[10].text.strip() # 师傅姓名 无法获取 # data['mastermobile'] = tablecolumns[10].text.strip() # 师傅电话 无法获取 data['machineversion'] = str(bsObj.find("input", {"id": "jxid0"})["value"]) data['buydate'] = str(bsObj.find("input", {"id": "gmrq"})["value"]) data['repairtime'] = str(bsObj.find("input", {"id": "yyazsj"})["value"]) # 上门时间/预约安装时间 data['orderstatus'] = bsObj.find("span", {"id": "dqpgjd"}).text.strip() data['province'] = self.get_selected(bsObj.find("select", {"id": "sfen"})) data['city'] = self.get_selected(bsObj.find("select", {"id": "cshi"})) data['county'] = self.get_selected(bsObj.find("select", {"id": "xian"})) data['town'] = self.get_selected(bsObj.find("select", {"id": "jied"})) data['address'] = str(bsObj.find("input", {"id": "dizi"})["value"]) data['originname'] = self.get_selected(bsObj.find("select", {"id": "xslx"})) # 销售类型 作为工单来源 return data def logout(self): url = self.baseurl + "/hjzx/logout.jsp" self.headers['Referer'] = self.baseurl + '/hjzx/loginAction_login' self.session.get(url, headers=self.headers) if __name__ == '__main__': bjdomain = 'http://zjgl.bangjia.me' account = Util.getAccount(bjdomain) # print(account) # util = GreeUtil('S91898010070', 'S91898010070', adminid='24', factoryid='1') # print("loadMain result = {}".format(util.loadMain())) # util.logout() if account and 'loginname' in account and 'loginpwd' in account and 'adminid' in account and 'loginurl' in account: util = GreeUtil(account['loginname'], account['loginpwd'], adminid=account['adminid'], factoryid="10002", baseurl=unquote(account['loginurl']), bjdomain=bjdomain) print("gree loadMain result = {}".format(util.loadMain())) util.logout()
/JDUtil.py
import json import os import re import sys import time import requests from hyper.tls import init_context from BaseUtil import BaseUtil from hyper import HTTPConnection, HTTP20Connection businessTypes = {"1": "上门安装", "2": "送货服务", "3": "提货送装", "4": "拆卸包装", "5": "退货服务"} statusTypes = {"1": "新订单", "2": "自动分配失败", "3": "已分配", "4": "申请改派", "5": "已接收", "6": "已预约", "7": "已派工", "8": "上门完成", "12": "确认完成", "13": "取消服务", "14": "确认取消服务", "15": "客户取消"} class JDUtil(BaseUtil): def __init__(self, username='', passwd='', adminid='24', factoryid='19', baseurl='http://jdfw.jd.com', bjdomain='http://yxgtest.bangjia.me'): super(JDUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain) self.mainurl = self.baseurl + '/admin/page!main.action' self.searchurl = self.baseurl + '/receipt/query.json' self.popurl = "https://opn.jd.com/bill/query.json" self.cookie = BaseUtil.getCookie([{"domain": ".jd.com"}]) self.cookies = BaseUtil.getCookies(self.cookie) self.headers['Cookie'] = self.cookie self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" self.headers['Content-Type'] = 'application/x-www-form-urlencoded' def loadMain(self): self.headers['Referer'] = self.baseurl + '/receipt/receiptDashboardIndex?homePageDistinguish=notAppointed' self.headers['Accept'] = '*/*' response = self.session.post(self.baseurl + "/common/inforLinkage/getPerson", headers=self.headers) response.encoding = 'utf-8' print("loadMain result:{}".format(response.text)) # print("=============================================") if response.status_code == 200: return self.getOrgan(json.loads(response.text)) return self.datafail def getOrgan(self, datas): response = self.session.post(self.baseurl + "/wareset/getImBaseLasWare", headers=self.headers, data={"lasWareCode": datas['wareHouseNo']}) response.encoding = 'utf-8' # print("getOrgan result:{}".format(response.text)) # print("=============================================") if response.status_code == 200: return self.loadMains(dict(datas, **(json.loads(response.text)[0]))) return self.datafail def uploadOrders(self, datas): try: data = {"data": json.dumps(datas)} # print("uploadOrders data={}".format(data)) requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except Exception as e: print("addorder failed:", e) return self.datafail return self.datasuccess def mergeData(self, result, orders): if orders and "code" not in orders: result += orders return result def loadMains(self, datas): result = [] result = self.mergeData(result, self.loadPageOrders(datas, 0)) result = self.mergeData(result, self.loadPageOrders(datas, 1)) self.uploadOrders(result) time.sleep(1) result = [] result = self.mergeData(result, self.loadPageOrders(datas, 3)) time.sleep(1) result = self.mergeData(result, self.loadPageOrders(datas, 4)) # print("loadMains result={}".format(result)) # print("=============================================") return self.uploadOrders(result) def ispop(self, serviceType): return serviceType == 3 or serviceType == 4 def loadPopOrder(self, data, serviceType): result = "" for item in data: result += item + "=" + data[item] + "&" result = result[:-1] # 修改路径 realpath = os.path.dirname(os.path.realpath(sys.argv[0])) print("realpath>>>>", realpath) cafile = os.path.join(realpath, "resource", 'pem', "certs.pem") print("cert_loc cafile>>>", cafile) conn = HTTP20Connection(host='opn.jd.com', port=443, ssl_context=init_context(cafile)) headers = self.headers.copy() headers['Referer'] = "https://opn.jd.com/bill/search?billStatus=5" headers['Host'] = "opn.jd.com" headers['Origin'] = "https://opn.jd.com" headers[':authority'] = 'opn.jd.com' headers[':method'] = 'POST' headers[':path'] = '/bill/query.json' headers[':scheme'] = 'https' response = conn.request(method='POST', url=self.popurl, body=result, headers=headers) resp = conn.get_response(response) if resp.status != 200: print("请求{}失败,返回:{},请使用谷歌浏览器重新登录京东系统".format(response.url, response.text)) return self.dataverify res = resp.read() # print(res) return list(self.parseOrders(json.loads(res), serviceType)) def loadPageOrders(self, datas, serviceType): """ 抓取serviceType [0,1] 类型的所有单子 # 0为安维工单 1为售后工单 3为POP服务单 4为POP家具服务单""" data = { "sort": "returnTime" if not self.ispop(serviceType) else "billId", "order": "desc", "sortKind": "4", "page": "1", "rows": "500", "reservationStatus": "", # 3 为未预约状态 空为所有状态 } if self.ispop(serviceType): data['isAppliance'] = '1' if serviceType == 3 else '0' data['billStatuses'] = '5' data['isEgBuy'] = '0' data['outletsNo'] = str(datas['infoLink']) return self.loadPopOrder(data, serviceType) else: data['serviceType'] = str(serviceType) data['fastDealNum'] = '5' # 5为 待预约,7为待反馈 0为所有状态 data['esSwitch'] = '1' data['subCompanyId'] = str(datas['orgNo']) data['wareInfoId'] = str(datas['lasWareRelation']) data['outletsId'] = str(datas['infoLink']) result = "" for item in data: result += item + "=" + data[item] + "&" result = result + "freeinstall=&startStatus=&endStatus=&timeout=&todayOtherReservationConditionName=&productBrand=&productType1=&productType2=&productType3=&orderId=&bizOrderId=&ordernoGroup=&customerName=&customerPhone=&serviceStreet=&wareId=&productName=&orderStatus=&orderStatusGroup=&createOrderTimeBegin=&createOrderTimeEnd=&reservationDateBegin=&reservationDateEnd=&firstReservationTimeBegin=&firstReservationTimeEnd=&changedReservationDateBegin=&changedReservationDateEnd=&feedbackStatus=&orderOrderStatus=&expectAtHomeDateBegin=&expectAtHomeDateEnd=&atHomeFinishDateBegin=&atHomeFinishDateEnd=&deliveryDateStart=&deliveryDateEnd=&homePageDistinguish=&fastDealNumByColor=&reportLessFlag=&superExperienceStore=&sourceOrderIdGroup=&sellerId=&sellerName=&eclpBusinessNo=&isFast=" # print("loadPageOrders requesturl=", result) params = {} datas = result.split("&") for data in datas: content = data.split("=") if len(content) > 1: params[content[0]] = content[1] self.headers['X-Requested-With'] = 'XMLHttpRequest' self.headers['Accept'] = 'application/json, text/javascript, */*; q=0.01' self.headers['Referer'] = self.baseurl + '/receipt/receiptDashboardIndex?homePageDistinguish=notAppointed' \ '&serviceType=' + str(serviceType) url = self.searchurl if not self.ispop(serviceType) else self.popurl response = self.session.post(url, headers=self.headers, data=params) response.encoding = 'utf-8' # print(response.url) # print(response.text) # print(response.headers) if response.status_code != 200 or "error" in response.url: print("请求{}失败,返回:{},请使用谷歌浏览器重新登录京东系统".format(response.url, response.text)) return self.dataverify return list(self.parseOrders(self.getjson(response), serviceType)) def parseOrders(self, datas, serviceType): if 'total' not in datas: return [] total_num = datas['total'] print("total count:{}".format(total_num)) for data in datas['rows']: yield from self.parseOrder(data, serviceType) def getordername(self, data, serviceType): if self.ispop(serviceType) and 'businessType' in data and data['businessType']: index = str(int(data['businessType'])) return businessTypes[index] if index in businessTypes else '' elif not self.ispop(serviceType) and 'reservationServiceTypeName' in data: return data['reservationServiceTypeName'] if data['reservationServiceTypeName'] else '' def parseOrder(self, data, serviceType): # reservationServiceTypeName :安装 createOrderTime:1588123851000 mobile = str(data['customerPhone']) if 'customerPhone' in data else '' address = str(data['serviceStreet']) if 'serviceStreet' in data else data['customerAddress'] address = address.replace(",", "").replace(",", "") if address else '' brand = re.sub(r'([^()]*)', '', data['productBrandName']) createTimeKey = "createOrderTime" if 'createOrderTime' in data else "createTime" orderid = "orderno" if not self.ispop(serviceType) else "billNo" orderno = "_{}".format(data[orderid]) if orderid in data and data[orderid] else '' ps = (" 安维单号:{}" if serviceType != 1 else " 售后单号:{}").format(data[orderid]) if 'expectAtHomeDate' in data: repairtime = data['expectAtHomeDate'] elif 'reservationInstallTime' in data and data['reservationInstallTime']: repairtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(data['reservationInstallTime'] / 1000)) else: repairtime = '' order_info = { 'factorynumber': (data['orderId'] if 'orderId' in data else data['orderid']) + orderno, 'ordername': self.getordername(data, serviceType), 'username': data['customerName'], 'mobile': mobile, 'originname': '京东系统', 'orderstatus': data['orderStatusName'] if 'orderStatusName' in data else statusTypes["5"], 'machinetype': data['productTypeName'] if 'productTypeName' in data else data['productCategoryName'], 'machinebrand': brand, 'version': data['productName'], 'sn': data['wareId'] if 'wareId' in data else data['productSku'], 'companyid': self.factoryid, 'adminid': self.adminid, 'address': address, 'province': data['serviceProvince'] if 'serviceProvince' in data else data['provinceName'], 'city': data['serviceCity'] if 'serviceCity' in data else data['cityName'], 'county': data['serviceCounty'] if 'serviceCounty' in data else data['districtName'], 'town': data['serviceDistrict'] if 'serviceDistrict' in data else data['streetName'], 'ordertime': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(data[createTimeKey] / 1000)), 'repairtime': repairtime, 'note': str(data['feedbackNote'] if 'feedbackNote' in data else data['saleFrom']) + str( data['exceptionFeeApprovalStatusName'] if 'exceptionFeeApprovalStatusName' in data else ''), 'description': str(data['feedbackResult'] if 'feedbackResult' in data else data['reservationFailReason']) + ps, 'ordernoSecret': data['ordernoSecret'] if 'ordernoSecret' in data else data['businessNo'] } order_info = JDUtil.clearAddress(order_info) if not self.ispop(serviceType): order_info = self.getUserInfo(order_info) # print(order_info) yield order_info def parseUserMobile(self, data, url, referer): header = self.headers.copy() header['Referer'] = referer response = self.session.get(url, headers=header) # print("parseUserMobile response:{}".format(response.text)) if response.status_code != 200: return data bsObj = self.getsoup(response) tr = bsObj.find("form", {"id": "searchForm"}).find("tbody").find("tr") data['mobile'] = tr.find("input", {"name": "customerPhone"})["value"] return data def getUserInfo(self, data): if not data or "ordernoSecret" not in data: return data userurl = self.baseurl + "/receipt/manage?orderno=" + data['ordernoSecret'] self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" response = self.session.get(userurl, headers=self.headers) # print("getUserInfo response:{}".format(response.text)) if response.status_code != 200: return data bsObj = self.getsoup(response) iframe = bsObj.find("iframe", {"id": "innerframe"}) if iframe: url = self.baseurl + str(iframe['src']) # parsed_url = urlparse(url) # params = dict(parse.parse_qsl(parsed_url.query)) return self.parseUserMobile(data, url, userurl) return data if __name__ == '__main__': util = JDUtil(adminid='24', factoryid='19') # util = JDUtil(adminid='69046', factoryid='19') print(util.loadMain())
/MIUtil.py
import datetime import json import re import time from urllib import parse from urllib.parse import urlparse import requests # from requests_html import HTMLSession # from utils.ChromeCookie import fetch_chrome_cookie from BaseUtil import BaseUtil from cookie_test import fetch_chrome_cookie class MIUtil(BaseUtil): def __init__(self, adminid='68891', factoryid='17', baseurl='https://xms.be.xiaomi.com', bjdomain='http://yxgtest.bangjia.me'): super(MIUtil, self).__init__('', '', adminid, factoryid, baseurl, bjdomain) parsed_uri = urlparse(baseurl) self.host = parsed_uri.netloc self.baseurl = baseurl self.adminid = adminid self.factoryid = factoryid self.bjdomain = bjdomain self.mainurl = self.baseurl + '/admin/page!main.action' self.searchurl = self.baseurl + '/afterservice/afterservice!api.action' self.cookie = fetch_chrome_cookie( [{"domain": ".xiaomi.com", "fields": ['uLocale', 'cUserId', 'userId', 'xmsbe_slh', "xst"]}, {"domain": ".be.xiaomi.com", "fields": ["xst"]}, {"domain": "xms.be.xiaomi.com"}, {"domain": ".xms.be.xiaomi.com"}, # {"domain": ".account.xiaomi.com"}, # {"domain": ".mi.com"} ]) # print(self.cookie) self.cookies = MIUtil.getCookies(self.cookie) self.session = requests.Session() # self.session = HTMLSession() # self.agent = random.choice(agents) self.agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \ 'Chrome/81.0.4044.113 Safari/537.36' self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''} self.datafail = {'code': 0, 'msg': '抓单失败,请使用谷歌浏览器登录小米账号后重试'} self.dataverify = {'code': 2, 'msg': '登录过期,请重新登录', 'element': ''} self.headers = {'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'User-Agent': self.agent, 'Upgrade-Insecure-Requests': '1', 'Host': self.host, 'Origin': self.baseurl, 'Accept-Encoding': 'gzip, deflate, br', 'Cookie': self.initCookie(self.cookies), 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Accept': 'application/json, text/javascript, */*; q=0.01'} def initCookie(self, cookies=None): if not cookies: return "" result = "" for cookie in cookies: result += cookie + "=" + cookies[cookie] + "; " return result[:-2] def loadMain(self): if 'userId' not in self.cookies: return self.datafail # searchurl = self.searchurl + "?router=service_list" # data = "method=srvServicing.getJurisdictionOrg&params=" + self.cookies['userId'] # print(data) self.headers['Referer'] = self.mainurl + "?" # print(self.headers['Cookie']) # print("***********************************") headers = self.headers.copy() headers[ 'Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" response = self.session.get(self.searchurl + "?router=service_list", headers=headers) response.encoding = 'utf-8' # print(response.headers['Set-Cookie']) # orgIds = re.findall(r"var orgId = \"(.+?)\"", response.text, re.S) # datas = json.loads(response.text) # print(response.text) result = re.findall(re.compile(r"originOrgId: ['](.*?)[']", re.S), response.text) if not result or len(result) == 0: return self.datafail orgId = result[0] # originOrgId = re.findall(r"originOrgId: '(.+?)',", response.text, re.S)[0] originOrgId = orgId # print(originOrgId) return self.loadOrders({'orgId': orgId, "originOrgId": originOrgId}) def loadOrders(self, param=None): self.headers['Referer'] = self.searchurl # print(self.headers['Cookie']) # print("===============") startTime = (datetime.date.today() + datetime.timedelta(days=-3)).strftime("%Y-%m-%d") endTime = (datetime.date.today() + datetime.timedelta(days=+1)).strftime("%Y-%m-%d") params = {"key": "", "miliao": "", "curOperator": self.cookies['userId'], "originOrgId": param['originOrgId'], "orgId": param['orgId'], "sId": "", "tel": "", "imei": "", "sn": "", "orderId": "", "createStartTime": startTime, "createEndTime": endTime, "signStartTime": "", "signEndTime": "", "closeStartTime": "", "closeEndTime": "", "returnStartTime": "", "returnEndTime": "", "fullStartTime": startTime, "fullEndTime": endTime, "pageInfo": {"pageNum": 1, "pageSize": 50}} data = {'method': 'srvServicing.searchList', 'params': json.dumps(params)} response = self.session.post(self.searchurl, data=parse.urlencode(data), headers=self.headers) response.encoding = 'utf-8' # print("===================================loadOrders") # print(response.text) datas = json.loads(response.text) # print(datas['result']['pageInfo']['total']) if datas['code'] == 1: try: data = {"data": json.dumps(list(self.parseOrders(datas)))} # print("data=", data) requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except Exception as e: print(str(e)) return self.datafail return self.datasuccess return self.datafail def parseOrders(self, datas): total_num = datas['result']['pageInfo']['total'] # print("total count:{}".format(total_num)) for order_key in datas['result']['srvInfos']: # flag = 0 # for key in order_list: # if (order_list[key]['factorynumber'] == order_key['sId']): # order_list[key]['sn'] = order_list[key]['sn'] + "," + order_key['sns'] # flag = 1 # break # if flag == 1: # continue order_info = {'factorynumber': order_key['sId'], 'ordername': order_key['typeDesc'], 'username': order_key['customerName'], 'mobile': order_key['customerTel'], 'orderstatus': order_key['statusDesc'], 'machinetype': order_key['goodsNames'].replace("小米", ''), 'sn': order_key['sns'], 'companyid': self.factoryid, 'machinebrand': '小米', 'originname': '小米系统', 'adminid': self.adminid} yield from self.getDetail(order_info, order_key) # 查询详情接口 def getDetail(self, order, datas): self.headers['Referer'] = self.mainurl post_data = "method=srvServicing.getCommonSrvDetail&params=%7B%22sId%22%3A%22" + datas['sId'] + \ "%22%2C%22conditions%22%3A%22BASEINFO%22%7D" response = self.session.post(self.searchurl, data=post_data, headers=self.headers) response.encoding = 'utf-8' json_ret2 = json.loads(response.text) # print("===================================getDetail result") # print(response.text) if json_ret2['code'] == 1: datas['addressDescC'] = json_ret2['result']['baseInformation']['addressDescC'] order['address'] = json_ret2['result']['baseInformation']['addressDesc'] timeArray = time.localtime(json_ret2['result']['baseInformation']['applyTime'] / 1000) otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray) order['ordertime'] = otherStyleTime if json_ret2['result']['baseInformation']['hopeVisitTime']: order['repairtime'] = json_ret2['result']['baseInformation']['hopeVisitTime'] createFrom = json_ret2['result']['baseInformation']['createFrom'] if createFrom.find("预付费") != -1 and createFrom != '': order['note'] = createFrom if len(json_ret2['result']['baseInformation']['items']) > 0: priceitem = json.loads(json_ret2['result']['baseInformation']['items'][0]['extendContent']) order['note'] = order['note'] + str(priceitem['price']) yield from self.showMsg(order, datas) def showMsg(self, order, datas): show_url = self.baseurl + '/common/common!savePrivateLogOperate.action' post_data = {"content": json.dumps({"miliao": [], "name": [datas['customerNameC']], "tel": [datas['customerTelC']], "email": [], "address": [datas['addressDescC']], "operateKey": datas['sId']})} response = self.session.post(show_url, data=post_data, headers=self.headers) response.encoding = 'utf-8' json_msg = json.loads(response.text) # print("===================================showMsg result") # print(response.text) if 'result' in json_msg: order['username'] = json_msg['result']['name'][0] order['mobile'] = json_msg['result']['tel'][0] order['address'] = json_msg['result']['address'][0] yield self.getDescription(order, datas) # 查询处理结果,问题描述 def getDescription(self, order, datas): self.headers['Referer'] = self.searchurl + '?router=service_info_detail&sId=' + datas['sId'] post_data = "method=srvServicing.getServiceVo&params=%7B%22sId%22%3A%22" + datas[ 'sId'] + "%22%2C%22conditions%22%3A%22%22%7D" response = self.session.post(self.searchurl, data=post_data, headers=self.headers) response.encoding = 'utf-8' json_ret3 = json.loads(response.text) if json_ret3['code'] == 1: data = json_ret3['result'] if data['customerDesc']: order['description'] = data['customerDesc'] fault = '' if len(data['items']) > 0: for item in data['items'][0]['itemHasFaults']: fault += item['faultName'] + ";" if data['items'][0]['faultDesc']: fault += data['items'][0]['faultDesc'] + ";" if data['items'][0]['methods']: fault += "处理方法:" + data['items'][0]['methods'][0]['name'] if fault: order['note'] = fault return order if __name__ == '__main__': # util = MIUtil('20845', factoryid='17') util = MIUtil('24', factoryid='17', bjdomain='http://yxgtest.bangjia.me') print(util.loadMain())
/MideaCookieUtil.py
import json import time from datetime import date, timedelta import requests from BaseUtil import BaseUtil from cookie_test import fetch_chrome_cookie class MideaUtil(BaseUtil): def __init__(self, username, passwd, adminid='24', factoryid='4', baseurl='https://cs.midea.com/c-css/', bjdomain='http://yxgtest.bangjia.me'): super(MideaUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain) self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' self.headers['Accept'] = "*/*" self.headers['Content-Type'] = 'application/json' self.cookie = fetch_chrome_cookie([{"domain": ".midea.com"}], isExact=False) self.cookies = BaseUtil.getCookies(self.cookie) self.headers['Cookie'] = self.cookie print("init cookie=", self.cookie) def loadOrders(self, param=None): # 开始加载工单 try: data = {"data": json.dumps(self.loadRolesOrder())} print("loadOrders data=", data) requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except: return self.dataverify return self.datasuccess def loadRolesOrder(self): roleurl = self.baseurl + "desktop/userInfo" self.headers['Referer'] = self.baseurl + "views/css/desktop/index.jsp" response = self.session.post(roleurl, headers=self.headers) print("userInfo result=", response.text) result = [] if response.status_code == 200 and response.text: roleresult = self.getjson(response) if not roleresult or 'status' not in roleresult or not roleresult['status']: return self.datafail if 'content' not in roleresult or 'orgUsers' not in roleresult['content']: return self.datafail for org in roleresult['content']['orgUsers']: orgId = org['orgEntityVO']['orgCode'] result = self.merge(result, self.switchOrg(orgId), "factorynumber") def switchOrg(self, orgId): roleurl = self.baseurl + "switchOrg" self.headers['Referer'] = self.baseurl + "views/css/desktop/index.jsp" params = {"currentOrg": orgId, "loginToken": self.cookies['loginToken']} response = self.session.post(roleurl, headers=self.headers, data=params) # self.initCookie() # print("switchOrg orgId={},params={}, result={} ".format(orgId, params, response.text)) response = self.session.get(self.baseurl + 'views/css/desktopPlugIn/wd_homePage.jsp', headers=self.headers) # print("wd_homePage orgId={},params={}, result={} ".format(orgId, params, response.text)) return list(self.loadPageOrder()) def loadPageOrder(self, page=1, totalcount=100, pageSize=100): dataurl = self.baseurl + "wom/serviceorderunit/listdata" data = {"page": page, "rows": pageSize, "pageIndex": page - 1, "pageSize": pageSize, "formConditions": {"SERVICE_ORDER_STATUS": "", "CONTAIN_EJFWS": "N", "CONTACT_TIME": (date.today() - timedelta(days=3)).strftime("%Y-%m-%d"), "CONTACT_TIME_end": (date.today()).strftime("%Y-%m-%d")}} response = self.session.post(dataurl, headers=self.headers, data=json.dumps(data)) self.headers['Referer'] = self.baseurl + "wom/serviceorderunit/list?type=womServiceNotFinshCount" response.encoding = 'utf-8' print("loadOrders response={}".format(response.text)) result = json.loads(response.text) if result and 'status' in result and result['status']: data = result['content'] totalcount = data['total'] pagecount = data['pageCount'] pageSize = data['pageSize'] page = data['pageIndex'] # print("totalcount={} pagecount={} pageSize={} page={}".format(totalcount, pagecount, pageSize, page)) if page >= pagecount: yield from self.parseOrders(data) else: yield from self.parseOrders(data) yield from self.loadPageOrder(page + 1, totalcount, pageSize) def parseOrders(self, data): for item in data['rows']: yield { 'factorynumber': item['SERVICE_ORDER_NO'], 'ordername': item['SERVICE_SUB_TYPE_NAME'], 'username': item['SERVICE_CUSTOMER_NAME'], 'mobile': item['SERVICE_CUSTOMER_TEL1'], 'orderstatus': item['SERVICE_ORDER_STATUS'], 'originname': item['ORDER_ORIGIN'], 'machinetype': item['PROD_NAME'], 'machinebrand': item['BRAND_NAME'], 'sn': '', 'version': item['PRODUCT_MODEL'] if 'PRODUCT_MODEL' in item else '', 'repairtime': item['FINAL_APPOINT_TIME'] if 'FINAL_APPOINT_TIME' in item else '', 'mastername': item['ENGINEER_NAME'] if 'ENGINEER_NAME' in item else '', 'note': item['PUB_REMARK'] if 'PUB_REMARK' in item else '', 'companyid': self.factoryid, 'adminid': self.adminid, 'address': str(item['SERVICE_CUSTOMER_ADDRESS']), # 'province': item['provinceName'], 'city': item['cityName'], # 'county': item['regionName'], 'town': item['countyName'], 'ordertime': item['CONTACT_TIME'], 'description': item['SERVICE_DESC'], } if __name__ == '__main__': # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1') # bangjia:13819807915 美的:AW3306009461 Md123456789 util = MideaUtil('AW3306009461', 'Md123456789!', adminid='24', factoryid='4') # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1') print(util.loadOrders())
/MideaUtil.py
import json import time from datetime import date, timedelta import requests from BaseUtil import BaseUtil class MideaUtil(BaseUtil): def __init__(self, username, passwd, adminid='24', factoryid='4', baseurl='https://cs.midea.com/c-css/', bjdomain='http://yxgtest.bangjia.me'): super(MideaUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain) self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' self.dataverify = {'code': 2, 'msg': '输入验证码', 'element': ''} def login(self, param=None): self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' if not param: loginurl = self.baseurl + "login" self.headers['Referer'] = loginurl response = self.session.get(loginurl, headers=self.headers) response.encoding = 'utf-8' print("login statuscode={}".format(response.status_code == 200)) print("login response={}".format(response.text)) if response.status_code == 200: result = self.loginauth() else: return self.getCaptcha() else: result = self.loginauth(param) print("login result={}".format(result)) print("param={}".format(param)) return param def getCaptcha(self): self.dataverify['url'] = self.baseurl + "captcha?r={}".format(round(time.time()*1000)) return self.dataverify def loginauth(self, param=None): code = param['code'] if param and 'code' in param else param if not code: if not self.checkState(): return self.getCaptcha() else: code = '' authurl = self.baseurl + "signin" data = {"userAccount": self.username, "userPassword": "6d904a32d4dbf2db15336eadca0d4802edfe2f85c0da02a32bff93b70c8d0b2c7181fd58c434c7838dd2b234feda762fbca546967a5ea7568958f55bc7966dd1", "captcha": code, "domainType": "CS"} print("loginauth data={}".format(data)) response = self.session.post(authurl, headers=self.headers, data=data) self.headers['Referer'] = authurl response.encoding = 'utf-8' print("loginauth result={}".format(response.text)) if response.status_code == 200: result = json.loads(response.text) if result and 'status' in result and result['status']: return self.loadOrders(True) return self.datafail def checkState(self): checkurl = self.baseurl + "captchaState" data = {"userAccount": self.username} response = self.session.post(checkurl, headers=self.headers, data=data) response.encoding = 'utf-8' result = False print("checkstate response={}".format(response.text)) if response.status_code == 200: state = json.loads(response.text) if state and 'content' in state and not state['content']: result = True else: result = False print("checkstate result={}".format(result)) return result def isLogin(self): self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' mainurl = self.baseurl + "views/css/desktop/index.jsp" print(mainurl) response = self.session.get(mainurl, headers=self.headers) response.encoding = 'utf-8' print("loadOrders response={}".format(response.text)) if response.status_code == 200 and not response.text.startswith("<script>"): return True return False def loadOrders(self, param=None): if not param and not self.isLogin(): return self.login() try: data = {"data": json.dumps(list(self.loadPageOrder()))} requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except: return self.datafail return self.datasuccess def loadPageOrder(self, page=1, totalcount=100, pageSize=100): # 开始加载工单 self.headers['Accept'] = "*/*" self.headers['Content-Type'] = 'application/json' dataurl = self.baseurl + "womflow/serviceorderunit/listdata" data = {"page": page, "rows": pageSize, "pageIndex": page - 1, "pageSize": pageSize, "formConditions": {"SERVICE_ORDER_STATUS": "", "CONTACT_TIME": (date.today() - timedelta(days=7)).strftime("%Y-%m-%d"), "CONTACT_TIME_end": (date.today()).strftime("%Y-%m-%d")}} response = self.session.post(dataurl, headers=self.headers, data=json.dumps(data)) self.headers['Referer'] = self.baseurl + "womflow/serviceorderunit/list?type=womServiceNotFinshCount" response.encoding = 'utf-8' print("loadOrders response={}".format(response.text)) result = json.loads(response.text) if result and 'status' in result and result['status']: data = result['content'] totalcount = data['total'] pagecount = data['pageCount'] pageSize = data['pageSize'] page = data['pageIndex'] print("totalcount={} pagecount={} pageSize={} page={}".format(totalcount, pagecount, pageSize, page)) if page >= pagecount: yield from self.parseOrders(data) else: yield from self.parseOrders(data) yield from self.loadPageOrder(page + 1, totalcount, pageSize) def parseOrders(self, data): for item in data['rows']: yield { 'factorynumber': item['SERVICE_ORDER_NO'], 'ordername': item['SERVICE_SUB_TYPE_NAME'], 'username': item['SERVICE_CUSTOMER_NAME'], 'mobile': item['SERVICE_CUSTOMER_TEL1'], 'orderstatus': item['SERVICE_ORDER_STATUS'], 'originname': item['ORDER_ORIGIN'], 'machinetype': item['PROD_NAME'], 'machinebrand': item['BRAND_NAME'], 'sn': '', 'version': item['PRODUCT_MODEL'] if 'PRODUCT_MODEL' in item else '', 'repairtime': item['FINAL_APPOINT_TIME'] if 'FINAL_APPOINT_TIME' in item else '', 'mastername': item['ENGINEER_NAME'] if 'ENGINEER_NAME' in item else '', 'note': item['PUB_REMARK'] if 'PUB_REMARK' in item else '', 'companyid': self.factoryid, 'adminid': self.adminid, 'address': str(item['SERVICE_CUSTOMER_ADDRESS']), # 'province': item['provinceName'], 'city': item['cityName'], # 'county': item['regionName'], 'town': item['countyName'], 'ordertime': item['CONTACT_TIME'], 'description': item['SERVICE_DESC'], } if __name__ == '__main__': # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1') util = MideaUtil('AW3306009461', 'Md123456789!', adminid='24', factoryid='4') # util = MideaUtil('Aw3302060387', 'Jj62721262', adminid='24', factoryid='4') # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1') print(util.loadOrders())
/SuningUtil.py
import json import re import time from datetime import date, timedelta from urllib import parse from urllib.parse import urlencode, urlparse import requests from BaseUtil import BaseUtil from cookie_test import fetch_chrome_cookie class SuningUtil(BaseUtil): def __init__(self, username, passwd, adminid='24', factoryid='4', baseurl='http://ases.suning.com', bjdomain='http://yxgtest.bangjia.me'): super(SuningUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain) self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" # self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' self.headers['Accept-Encoding'] = 'gzip, deflate' self.headers['Accept-Language'] = 'zh-CN,zh;q=0.9' self.cookie = fetch_chrome_cookie([ {"domain": "ases.suning.com"}, {"domain": ".ases.suning.com"}, {"domain": ".suning.com"}, {"domain": "tianyan.suning.com"}, ], isExact=True) self.cookies = BaseUtil.getCookies(self.cookie) self.headers['Cookie'] = self.cookie # print(self.cookie) self.userinfo = None def loadBI(self, param=None): # print("===================loadBI") loginurl = self.baseurl + "/ases-web/main/homeServiceOrders/biSmgzbb.action" header = self.headers.copy() del header['Content-Type'] del header['Origin'] loginRes = self.session.get(loginurl, headers=header) url = loginRes.url print(url) return url if "guId" in url else None def loadMenu(self, param=None): # print("===================loadMenu") loginurl = self.baseurl + "/ases-web/main/menu/queryMenu.action?pId=FUN_18_02" self.headers['Accept'] = 'application/json, text/plain, */*' self.headers['Referer'] = self.baseurl + '/ases-web/index.html' menuRes = self.session.get(loginurl, headers=self.headers) # print(menuRes.headers) # FUN_18_02_33 BI FUN_18_02_04:改派工人管理 # print(menuRes.text) def getUserinfo(self, param=None): # self.loadMenu() print("===================getUserinfo") loginurl = self.baseurl + "/ases-web/main/user/userInfo.action" self.headers['Accept'] = 'application/json, text/plain, */*' self.headers['Referer'] = self.baseurl + '/ases-web/index.html' print("headers=", self.headers) userRes = self.session.get(loginurl, headers=self.headers) print("userRes=", userRes.text) userinfo = self.getjson(userRes) print(userinfo) if userinfo and userinfo['result'] and userinfo['data']: wd = userinfo['data']['wd'] supplierCode = userinfo['data']['supplierCode'] userId = userinfo['data']['userId'] companyCode = userinfo['data']['companyCode'][0] result = {"wd": wd, "supplierCode": supplierCode, "userId": userId, "companyCode": companyCode} return result return None def loadOrders(self, param=None): # print("=================================loadOrders") if not self.userinfo: self.userinfo = self.getUserinfo() if not self.userinfo: return self.datafail biurl = self.loadBI() if not biurl: return self.datafail parsed_uri = urlparse(biurl) tianyanbase = parsed_uri.scheme + "://" + parsed_uri.netloc url = tianyanbase + "/lbi-web-in/ww/visittrack/queryGrid.action" header = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': self.agent, 'Upgrade-Insecure-Requests': '1', 'Host': parsed_uri.netloc, 'Origin': tianyanbase, 'Cookie': self.cookie, 'Accept-Encoding': 'gzip, deflate', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html, */*; q=0.01'} bires = self.session.get(biurl, headers=header) # print("bires=", bires.text) # print("bires header=", bires.headers) cookies = self.cookies.copy() for c in bires.cookies: cookies[c.name] = c.value # print(c.name, c.value) header['Referer'] = biurl header['Cookie'] = self.initCookie(cookies) orders = list(self.searchBI(url, header, 1)) print("loadOrders result count=", len(orders)) try: data = {"data": json.dumps(orders)} # print(data) requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) self.loadGaipaiOrder() except: return self.dataverify return self.datasuccess def initCookie(self, cookies=None): if not cookies: return "" result = "" for cookie in cookies: result += cookie + "=" + cookies[cookie] + "; " return result[:-2] def searchBI(self, url, header, page=1, totalcount=100): params = {"wd": self.userinfo['wd'][0], "companyCode": self.userinfo['companyCode'], "reservationStartDate": (date.today() - timedelta(days=1)).strftime("%Y%m%d"), "reservationEndDate": (date.today() + timedelta(days=1)).strftime("%Y%m%d"), "sapOrderType": "ZS01,ZS02,ZS03,ZS04,ZS06,ZS11,ZS12,ZS24", "page": str(page), "pageSize": "10" } # print("header['Cookie']=", header['Cookie']) biresult = self.session.post(url, headers=header, data=params) # print("url=", url, "biresult=", biresult.text) soup = self.getsoup(biresult) totalRe = re.findall(re.compile(r"(\d+)", re.S), soup.find("span", {"class": "total"}).text.strip()) if totalRe and len(totalRe) > 0: totalcount = totalRe[0] try: pageCount = int(soup.find("input", {"id": "pageCount"})['value']) except: pageCount = 0 resulttable = soup.find("table", {"class": "webtable"}) isall = page + 1 > pageCount print("totalcount=", totalcount, "pageCount=", pageCount, "page=", page, "isall=", isall) if resulttable: yield from self.parseOrders2(resulttable.find_all("tr"), header['Referer']) if not isall: yield from self.searchBI(url, header, page + 1, totalcount) def parseOrders2(self, tr_list, biurl): for tr in tr_list: if tr.has_attr('class'): continue order = self.parseOrder(tr) if order: yield self.orderdetail(order, biurl) def parseOrder(self, tr): tablecolumns = tr.find_all("td") try: orderno_td = tablecolumns[0] addr = tablecolumns[14].text.strip().split(";") # 0;安徽省;六安市;**** orderitem = orderno_td.find("a") if orderitem: # 这个是元素的点击事件id,下一个页面需要用到 data = { "oid": re.findall(re.compile(r"[(]'(.*?)'[)]", re.S), orderitem["onclick"])[0], 'factorynumber': self.finda(orderno_td), 'originname': tablecolumns[16].text.strip(), 'username': tablecolumns[13].text.strip(), 'mobile': tablecolumns[15].text.strip(), 'ordername': tablecolumns[2].text.strip().replace("服务订单", ""), 'ordertime': tablecolumns[6].text.strip(), 'mastername': tablecolumns[23].text.strip(), 'province': addr[1] if len(addr) > 1 else "", 'city': addr[2] if len(addr) > 2 else "", 'companyid': self.factoryid, 'machinebrand': tablecolumns[9].text.strip().split("(")[0], 'machinetype': tablecolumns[8].text.strip(), 'version': tablecolumns[7].text.strip(), # 'machinebrand': re.findall(re.compile(r"(.*?)[(].*?[)]", re.S), tablecolumns[9].text.strip())[0], 'orderstatus': tablecolumns[4].text.strip(), 'adminid': self.adminid} print("parseorder data=", data) return data # if self.isNew(data, self.bjdomain, self.adminid) else None except Exception as e: print("parseorder exception", e) return None def orderdetail(self, data, biurl): """获取到的是aes加密后的数据,暂未找到破解方法""" # url = self.baseurl + "/ases-web/main/external/bi/changeShow.action?orderId=" + data['oid'] # header = self.headers.copy() # header['Referer'] = biurl # detailRes = self.session.get(url, headers=header) # print("detailRes=", detailRes.text) # print("detail url=", detailRes.url) return data def loadGaipaiOrder(self): # 开始加载工单 self.headers['Accept'] = "application/json, text/plain, */*" self.headers['Content-Type'] = 'application/json' url = self.baseurl + "/ases-web/main/ui/dispatchWorker/queryList.action" params = {"wds": self.userinfo['wd'], "companyCode": self.userinfo['companyCode'], "srvTimeStart": (date.today() - timedelta(days=3)).strftime("%Y-%m-%d"), "srvTimeEnd": (date.today() + timedelta(days=3)).strftime("%Y-%m-%d"), "page": "1", "pageSize": "100" } url = url + "?" + str(parse.urlencode(params)) orderRes = self.session.get(url, headers=self.headers) gaipaiOrder = self.parseOrders(orderRes) """以下获取的es数据也为加密后的数据""" # print("orderRes.text=", orderRes.text) # esurl = self.baseurl + "/ases-web/main/ui/smOrder/queryListFromES.action" # self.headers['Content-Type'] = 'application/x-www-form-urlencoded' # self.headers['Accept-Encoding'] = 'gzip, deflate' # self.headers['Accept'] = 'application/json, text/plain, */*' # params = {"wd": self.userinfo['wd'][0], "companyCode": self.userinfo['companyCode'], # "srvSaleCountStart": (date.today() - timedelta(days=3)).strftime("%Y-%m-%d"), # "srvSaleCountEnd": (date.today() + timedelta(days=3)).strftime("%Y-%m-%d"), # "createTimeStart": "", "createTimeEnd": "", "finishTimeStart": "", "finishTimeEnd": "", # "orderId": "", "cmmdtyCtgry": "", "cityCodes": "", "mobPhoneNum": "", # "page": "1", "pageSize": "100" # } # print("esorder params=", params) # orderRes = self.session.post(esurl, headers=self.headers, data=params) # print("esorder orderRes.text=", orderRes.text) # ESOrder = self.parseOrders(orderRes) ESOrder = [] try: data = {"data": json.dumps(gaipaiOrder + ESOrder)} # print(data) requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except: return self.dataverify return self.datasuccess def parseOrders(self, orderRes): datas = self.getjson(orderRes) orders = [] if datas and 'result' in datas and datas['result'] and datas['data']: items = datas['data']['datas'] else: return orders for item in items: orders.append({ 'factorynumber': item['orderId'], 'ordername': item['operateItemDec'], 'username': item['consignee'], 'mobile': item['mobPhoneNum'], 'orderstatus': "改派工人", 'originname': "苏宁", # 'machinetype': item['PROD_NAME'], 'machinebrand': item['BRAND_NAME'], 'sn': item['cmmdtyCode'], 'version': item['cmmdtyName'] if 'cmmdtyName' in item else '', 'repairtime': item['srvTime'] if 'srvTime' in item else '', 'mastername': item['zyry1BpName'] if 'zyry1BpName' in item else '', 'note': item['srvMemo'] if 'srvMemo' in item else '', 'companyid': self.factoryid, 'adminid': self.adminid, 'address': str(item['srvAddress']).replace(";", "").strip(), # 'province': item['provinceName'], 'city': item['cityName'], # 'county': item['regionName'], 'town': item['countyName'], 'description': str(item['orderType']) + self.parseOrderType(item['orderType']), }) return orders def parseOrderType(self, ordertype): if ordertype == "ZS01": return "新机安装" elif ordertype == "ZS02": return "辅助安装" elif ordertype == "ZS03": return "移机" elif ordertype == "ZS04": return "退换货拆装" elif ordertype == "ZS06": return "上门维修" elif ordertype == "ZS09": return "用户送修检测" elif ordertype == "ZS10": return "用户送修维修" elif ordertype == "ZS11": return "上门鉴定" elif ordertype == "ZS12": return "清洗/保养" elif ordertype == "ZS24": return "家电回收" elif ordertype == "ZS30": return "家装" else: return "安装" if __name__ == '__main__': util = SuningUtil('W850018433', 'sn789456', adminid='24', factoryid='99') print(util.loadOrders()) # print(util.loadBI())
/TCSMCookieUtil.py
import io import json import re import sys import unicodedata from datetime import date, timedelta from urllib import parse import chardet import requests from idna import unicode from BaseUtil import BaseUtil from cookie_test import fetch_chrome_cookie class TCSMUtil(BaseUtil): def __init__(self, username, passwd, adminid='24', factoryid='6', baseurl='http://hk2.koyoo.cn/', bjdomain='http://yxgtest.bangjia.me'): super(TCSMUtil, self).__init__(username, passwd, adminid, factoryid, baseurl, bjdomain) self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' self.cookie = fetch_chrome_cookie([{"domain": ".koyoo.cn"}], isExact=False) self.cookies = BaseUtil.getCookies(self.cookie) self.headers['Cookie'] = self.cookie self.headers['Accept-Encoding'] = 'gzip, deflate' self.skills = [] def login(self, param=None): pass def islogin(self): self.headers['Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng," \ "*/*;q=0.8,application/signed-exchange;v=b3;q=0.9" self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' self.headers['Referer'] = self.baseurl + 'index.php?m=index&f=index' url = self.baseurl + "index.php?m=workorder&f=handleIndex" response = self.session.get(url, headers=self.headers) bsObj = self.getsoup(response) skillselect = bsObj.find("select", {"id": "skill"}) if skillselect: skills = skillselect.find_all('option') self.skills = skills return skills is not None else: return False def loadOrders(self, param=None): if not self.islogin(): print("loadOrders is not login") return self.dataverify self.headers['Accept'] = "application/json, text/javascript, */*; q=0.01" self.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' try: data = {"data": json.dumps(self.loadOrderbySkill())} requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) except Exception as e: print("loadOrders except:", e) return self.datafail return self.datasuccess def loadOrderbySkill(self): # print("loadOrderbySkill skills={}".format(self.skills)) results = [] for skill in self.skills: print("loadOrderbySkill skill={}".format(skill["value"])) # list(self.loadPageOrder(skill["value"])) results += list(self.loadPageOrder(skill["value"])) print("loadOrderbySkill results={}".format(results)) return results def loadPageOrder(self, skill=4209, page=1, totalcount=100, pageSize=100): dataurl = self.baseurl + "index.php?m=workorder&f=gridIndex" data = {"page": page, "rows": pageSize, "skillId": skill, "listType": "handle", "optid": "e7317288bb6d4849eec6dbe010d5d34e", "0[name]": "skill", "0[value]": skill, "1[name]": "Q|t2.dealstate|in", "1[value]": "OS_100,OS_400,OS_700,SS_W_REMIND", "27[name]": "isSearch", "27[value]": 1, "10[name]": "Q|t2.createtime|egt", "10[value]": BaseUtil.getDateBefore(3), "11[name]": "Q|t2.createtime|elt", "11[value]": BaseUtil.getDateBefore(0), } self.headers['Referer'] = dataurl # print("loadPageOrder data ={}".format(data)) response = self.session.post(dataurl, headers=self.headers, data=parse.urlencode(data)) response.encoding = 'gbk' resStr = response.text # sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030') print("loadOrders response={}".format(resStr)) resStr = re.sub(r'<label[^()]*?>', '', resStr) resStr = resStr.replace("<\\/label>", "") # resStr = resStr.encode("utf-8").decode("gbk") # resStr = resStr.encode("gbk", 'ignore').decode("utf-8", 'ignore') resStr = unicodedata.normalize('NFKD', resStr).encode('ascii', 'ignore').decode("utf-8", 'ignore') # resStr = resStr.encode("GBK", 'ignore').decode("unicode_escape") # print(chardet.detect(resStr)) # resStr = resStr.encode("utf-8").decode('unicode_escape') # """'gbk' codec can't encode character '\ufeff' in position 0: ???""" resStr = "{" + resStr print(resStr) if response.status_code == 200: result = json.loads(resStr) totalcount = result['total'] if page * pageSize >= totalcount: yield from self.parseOrders(result) else: yield from self.parseOrders(result) yield from self.loadPageOrder(page + 1, totalcount, pageSize) def parseOrders(self, data): for item in data['rows']: yield { 'factorynumber': self.parseHtml(item['worksn']), 'ordername': item['demandsmall'], 'username': item['customername'], 'mobile': item['customertel'], 'orderstatus': item['dealstate'], 'originname': item['srctype'], 'machinetype': item['probcate_id'], 'machinebrand': item['brand_id'], # 'sn': '', 'version': item['PRODUCT_MODEL'] if 'PRODUCT_MODEL' in item else '', 'repairtime': item['askdate'] + " " + (BaseUtil.getTimeStr(item['asktime'])), 'mastername': item['enginename'] if 'enginename' in item else '', # 'note': BeautifulSoup(item['processremark'], 'lxml').label.string, 'note': item['processremark'], 'companyid': self.factoryid, 'adminid': self.adminid, # 'address': BeautifulSoup(item['address'], 'lxml').label.string, 'address': item['address'], # 'province': item['provinceName'], 'city': item['cityName'], # 'county': item['regionName'], 'town': item['countyName'], 'ordertime': item['createtime'], # 'description': BeautifulSoup(item['clientrequirement'], 'lxml').label.string, 'description': item['clientrequirement'], } if __name__ == '__main__': # util = ConkaUtil('K608475', 'Kuser6646!', adminid='20699', factoryid='1') util = TCSMUtil('AW3306009461', 'Md123456789!', adminid='24', factoryid='4') # util = ConkaUtil('K608069', 'Crm@20200401', adminid='24', factoryid='1') print(util.loadOrders()) # print(util.loadPageOrder())
/Util.py
import json import requests from bs4 import BeautifulSoup class Util(object): @staticmethod def get_value(element): return element["value"] @staticmethod def get_selected(element): results = element.select('option[selected="selected"]') if results and len(results) > 0: return results[0]['value'] or '' option = element.find("option") if option: return option['value'] or '' return '' @staticmethod def getsoup(response): # print(response.status_code) response.encoding = 'utf-8' return BeautifulSoup(response.text, features="lxml") @staticmethod def finda(element): return element.find("a").text.strip() @staticmethod def findspan(element): return element.find("span").text.strip() @staticmethod def isNew(data, bjdomain, adminid): res = requests.post(bjdomain + "/Api/Climborder/checkexist", data={"orderno": data['factorynumber'], 'adminid': adminid}) return Util.checkBjRes(res) @staticmethod def getAccount(bjdomain): try: res = requests.post(bjdomain + "/Api/Climborder/newgetaccount", data={"mobile": "18205169014"}) if res.status_code == 200 and res.text: result = json.loads(res.text) if 'ret' not in result or int(result['ret']) != 0 or 'element' not in result or not result['element']: return None for factory in result['element']: if 'factoryid' in factory and int(factory['factoryid']) == 10002 and len(factory['accounts']) > 0: return factory['accounts'][0] else: return None except Exception as e: print("getaccount failed:", e) return None return None @staticmethod def clearKey(data, datakey, destkey='address'): if datakey in data and data[destkey] and data[destkey].strip().startswith(data[datakey].strip()): data[destkey] = data[destkey].replace(data[datakey], '', 1).strip() return data @staticmethod def clearAddress(orderinfo, destkey='address'): if destkey not in orderinfo: return orderinfo orderinfo = Util.clearKey(orderinfo, "province", destkey) orderinfo = Util.clearKey(orderinfo, "city", destkey) orderinfo = Util.clearKey(orderinfo, "county", destkey) orderinfo = Util.clearKey(orderinfo, "town", destkey) return orderinfo @staticmethod def checkBjRes(response): if response.status_code == 200 and response.text: result = json.loads(response.text) return 'ret' in result and int(result['ret']) == 0 return False @staticmethod def getTableRow(bsObj, id, func, row_no=None, truncate=True): """@truncate: 是否截取掉最后一个字符""" table = bsObj.find("table", {"id": id}) if not table: return "" alltr = table.find("tbody").find_all("tr") result = "" if row_no is not None and isinstance(row_no, int): if (0 <= row_no < len(alltr)) or (row_no < 0 and len(alltr) >= -row_no): return func(alltr[row_no].find_all("td")) if alltr[row_no] else "" for tr in alltr: note_td = tr.find_all("td") if note_td and len(note_td) > 2: item = func(note_td) result = result + item if truncate and result and len(result) > 0: result = result[:-1] return result
/aesgcm.py
import os import sys from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes ) NONCE_BYTE_SIZE = 12 def encrypt(cipher, plaintext, nonce): cipher.mode = modes.GCM(nonce) encryptor = cipher.encryptor() ciphertext = encryptor.update(plaintext) return (cipher, ciphertext, nonce) def decrypt(cipher, ciphertext, nonce): cipher.mode = modes.GCM(nonce) decryptor = cipher.decryptor() return decryptor.update(ciphertext) def get_cipher(key): cipher = Cipher( algorithms.AES(key), None, backend=default_backend() ) return cipher
/asdfsd.py
# 载入必要的模块 import wx import os import pygame from pygame.locals import * import itertools import random # 创建类 class Example(wx.Frame): def __init__(self, parent, title): # 继承父类wx.Frame的初始化方法,并设置窗口大小为320*220 super(Example, self).__init__(parent, title=title, size=(320, 220)) self.InitUI() self.Centre() self.Show() # 产生图片验证码的图像,保存在本地电脑 def generate_picture(self): # pygame初始化 pygame.init() # 设置字体和字号 font = pygame.font.SysFont('consolas', 64) # 产生字母及数字列表,并重组,取其前四个作为图片验证码的文字 chr_num_lst = list(itertools.chain([chr(ord('A') + _) for _ in range(26)], \ [chr(ord('a') + _) for _ in range(26)], \ [str(_) for _ in range(10)])) random.shuffle(chr_num_lst) self.val_text = chr_num_lst[0] + chr_num_lst[1] + chr_num_lst[2] + chr_num_lst[3] # 渲染图片,设置背景颜色和字体样式,前面的颜色是字体颜色 ftext = font.render(self.val_text, True, (0, 0, 255), (255, 0, 0)) # 保存图片 pygame.image.save(ftext, r"%s/val.png" % os.getcwd()) # 图片保存地址 def InitUI(self): # 产生验证码图片 self.generate_picture() # 利用wxpython的GridBagSizer()进行页面布局 panel = wx.Panel(self) sizer = wx.GridBagSizer(10, 20) # 列间隔为10,行间隔为20 # 添加账号字段,并加入页面布局,为第一行,第一列 text = wx.StaticText(panel, label="账号") sizer.Add(text, pos=(0, 0), flag=wx.ALL, border=5) # 添加文本框字段,并加入页面布局,为第一行,第2,3列 self.tc = wx.TextCtrl(panel) sizer.Add(self.tc, pos=(0, 1), span=(1, 2), flag=wx.EXPAND | wx.ALL, border=5) # 添加密码字段,并加入页面布局,为第二行,第一列 text1 = wx.StaticText(panel, label="密码") sizer.Add(text1, pos=(1, 0), flag=wx.ALL, border=5) # 添加文本框字段,以星号掩盖,并加入页面布局,为第二行,第2,3列 tc1 = wx.TextCtrl(panel, style=wx.TE_PASSWORD) sizer.Add(tc1, pos=(1, 1), span=(1, 2), flag=wx.EXPAND | wx.ALL, border=5) # 添加验证码字段,并加入页面布局,为第三行,第一列 text2 = wx.StaticText(panel, label="验证码") sizer.Add(text2, pos=(2, 0), flag=wx.ALL, border=5) # 添加文本框字段,并加入页面布局,为第三行,第2列 self.tc2 = wx.TextCtrl(panel) sizer.Add(self.tc2, pos=(2, 1), flag=wx.ALL, border=5) # 添加验证码图片,并加入页面布局,为第三行,第3列 image = wx.Image(r'%s/val.png' % os.getcwd(), wx.BITMAP_TYPE_PNG).Rescale(80, 25).ConvertToBitmap() # 获取图片,转化为Bitmap形式 self.bmp = wx.StaticBitmap(panel, -1, image) # 转化为wx.StaticBitmap()形式 sizer.Add(self.bmp, pos=(2, 2), flag=wx.ALL, border=5) # 添加登录按钮,并加入页面布局,为第四行,第2列 btn = wx.Button(panel, -1, "登录") sizer.Add(btn, pos=(3, 1), flag=wx.ALL, border=5) # 为登录按钮绑定login_process事件 self.Bind(wx.EVT_BUTTON, self.login_process, btn) # 将Panmel适应GridBagSizer()放置 panel.SetSizerAndFit(sizer) # 事件处理 def login_process(self, event): self.input_val = self.tc2.GetValue() # 获取验证码文本框的输入文字 # 判断验证码文本框的输入文字是否等于验证码图片上的文字(不计大小写),并弹出消息框 if self.input_val.lower() == self.val_text.lower(): wx.MessageBox("登录成功!\n欢迎您,%s!" % self.tc.GetValue(), '登录结果', wx.OK | wx.ICON_INFORMATION) else: wx.MessageBox("登录失败!请重试!", '登录结果', wx.OK | wx.ICON_INFORMATION) self.tc2.SetValue("") # 将验证码文本框清空 self.generate_picture() # 重新产生一张验证码图片 # 获取新产生的验证码图片,转化为Bitmap形式 image = wx.Image(r'%s/val.png' % os.getcwd(), wx.BITMAP_TYPE_PNG).Rescale(80, 25).ConvertToBitmap() # 更新GridBagSizer()的self.bmp self.bmp.SetBitmap(wx.BitmapFromImage(image)) # 主函数 def main(): app = wx.App() Example(None, title='图片验证GUI') app.MainLoop() main()
/chrome_cookies_old.py
import os import sqlite3 from collections import defaultdict # from win32.win32crypt import CryptUnprotectData ''' 实际使用场景请自行修改Cookies/cookies.sqlite位置,下面代码均为默认安装的位置,有些绿色版的文件夹位置以及老版本的渗透版火狐浏览器位置需要自行修改 ''' # # 获取chrome浏览器的cookies # def getcookiefromchrome(): # cookiepath = os.environ['LOCALAPPDATA'] + r"\Google\Chrome\User Data\Default\Cookies" # sql = "select host_key,name,encrypted_value from cookies" # with sqlite3.connect(cookiepath) as conn: # cu = conn.cursor() # select_cookie = (cu.execute(sql).fetchall()) # cookie_list = [] # for host_key, name, encrypted_value in select_cookie: # cookie = CryptUnprotectData(encrypted_value)[1].decode() # cookies = {host_key: name + ":" + cookie} # cookie_list.append(cookies) # d = defaultdict(list) # for cookie_item in cookie_list: # for key, value in cookie_item.items(): # d[key].append(value.strip()) # print(dict(d)) # # # getcookiefromchrome()
/cookie_test.py
import os import sys import sqlite3 import http.cookiejar as cookiejar import json, base64 import requests import aesgcm sql = """ SELECT host_key, name, path,encrypted_value as value FROM cookies """ def dpapi_decrypt(encrypted): import ctypes import ctypes.wintypes class DATA_BLOB(ctypes.Structure): _fields_ = [('cbData', ctypes.wintypes.DWORD), ('pbData', ctypes.POINTER(ctypes.c_char))] p = ctypes.create_string_buffer(encrypted, len(encrypted)) blobin = DATA_BLOB(ctypes.sizeof(p), p) blobout = DATA_BLOB() retval = ctypes.windll.crypt32.CryptUnprotectData( ctypes.byref(blobin), None, None, None, None, 0, ctypes.byref(blobout)) if not retval: raise ctypes.WinError() result = ctypes.string_at(blobout.pbData, blobout.cbData) ctypes.windll.kernel32.LocalFree(blobout.pbData) return result def unix_decrypt(encrypted): if not encrypted or len(encrypted) <= 3: return None print("unix_decrypt encrypted={}".format(encrypted)) if sys.platform.startswith('linux'): password = 'peanuts'.encode('utf8') iterations = 1 else: raise NotImplementedError from Crypto.Cipher import AES from Crypto.Protocol.KDF import PBKDF2 salt = b'saltysalt' iv = b' ' * 16 length = 16 key = PBKDF2(password, salt, length, iterations) cipher = AES.new(key, AES.MODE_CBC, IV=iv) decrypted = cipher.decrypt(encrypted[3:]) print("unix_decrypt decrypted={}".format(decrypted)) # return decrypted[:-ord(decrypted[-1])] return decrypted[:-decrypted[-1]] def get_key_from_local_state(): jsn = None with open(os.path.join(os.environ['LOCALAPPDATA'], r"Google\Chrome\User Data\Local State"), encoding='utf-8', mode="r") as f: jsn = json.loads(str(f.readline())) return jsn["os_crypt"]["encrypted_key"] def aes_decrypt(encrypted_txt): encoded_key = get_key_from_local_state() encrypted_key = base64.b64decode(encoded_key.encode()) encrypted_key = encrypted_key[5:] key = dpapi_decrypt(encrypted_key) nonce = encrypted_txt[3:15] cipher = aesgcm.get_cipher(key) return aesgcm.decrypt(cipher, encrypted_txt[15:], nonce) def chrome_decrypt(encrypted_txt): if sys.platform == 'win32': try: if encrypted_txt[:4] == b'\x01\x00\x00\x00': decrypted_txt = dpapi_decrypt(encrypted_txt) return decrypted_txt.decode() elif encrypted_txt[:3] == b'v10': decrypted_txt = aes_decrypt(encrypted_txt) return decrypted_txt[:-16].decode() except WindowsError: return None else: return unix_decrypt(encrypted_txt) # try: # # except NotImplementedError: # return None def to_epoch(chrome_ts): if chrome_ts: return chrome_ts - 11644473600 * 000 * 1000 else: return None class ChromeCookieJar(cookiejar.FileCookieJar): def __init__(self, filename=None, delayload=False, policy=None): self.cookies = [] if filename is None: if sys.platform == 'win32': filename = os.path.join( os.environ['USERPROFILE'], r'AppData\Local\Google\Chrome\User Data\default\Cookies') ''' AppData\\Local\\Google\\Chrome\\User Data\\Profile [n]\\Cookies ''' elif sys.platform.startswith('linux'): filename = os.path.expanduser( '~/.config/google-chrome/Default/Cookies') if not os.path.exists(filename): filename = os.path.expanduser( '~/.config/chromium/Default/Cookies') if not os.path.exists(filename): filename = None cookiejar.FileCookieJar.__init__(self, filename, delayload, policy) def _really_load(self, f, filename, ignore_discard, ignore_expires): con = sqlite3.connect(filename) con.row_factory = sqlite3.Row con.create_function('decrypt', 1, chrome_decrypt) con.create_function('to_epoch', 1, to_epoch) cur = con.cursor() cur.execute(sql) for row in cur: if row['value'] is not None: name = row['name'] value = chrome_decrypt(row['value']) host = row['host_key'] path = row['path'] cookie = {"name": name, "value": value, "host": host, "path": path} self.cookies.append(cookie) # print("host:" + str(host) + " path:" + str(path) + " name:" + str(name) + " value:" + str(value)) cur.close() def isDesiredDomain(origin, dest, isExact=True): if not isExact: return dest in origin else: return origin == dest def existInDomain(domain, cookie, isExact=True): if isDesiredDomain(cookie['host'], domain['domain'], isExact): if "fields" in domain and domain["fields"] and len(domain['fields']) > 0: for field in domain['fields']: if field == cookie['name']: return True else: return True if "filters" in domain and domain["filters"] and len(domain['filters']) > 0: for filter_item in domain['filters']: if filter_item == cookie['name']: return False return True else: return True return False def existInArray(domains, cookie, isExact=True): if not domains: return True for domain in domains: if existInDomain(domain, cookie, isExact): return True return False def fetch_chrome_cookie(domains=[], isExact=True): try: jar = ChromeCookieJar() jar.load() cookieValue = '' for item in jar.cookies: if existInArray(domains, item, isExact): cookieValue += item['name'] + '=' + item['value'] + '; ' return cookieValue[:-2] except Exception as e: print("fetch_chrome_cookie", e) return "" if __name__ == '__main__': coo = fetch_chrome_cookie([{"domain": ".jd.com"}], False) print(coo) session = requests.Session() cookie = coo headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36', 'Host': 'jdfw.jd.com', 'Origin': 'http://jdfw.jd.com', 'Accept-Encoding': 'gzip, deflate', 'Cookie': cookie, 'Accept-Language': 'zh-CN,zh;q=0.9', 'Connection': 'keep-alive', 'Accept': 'application/json, text/javascript, */*; q=0.01', "X-Requested-With": "XMLHttpRequest", 'Referer': 'http://jdfw.jd.com/receipt/receiptDashboardIndex?homePageDistinguish=notAppointed&serviceType=0'} data = { "esSwitch": "1", "subCompanyId": "10", "wareInfoId": "lw_10_334%%603_2", "outletsId": "0755860394", "sortKind": "4", "page": "1", "rows": "20", "sort": "returnTime", "order": "desc", "serviceType": "0", "fastDealNum": "5" } result = "" for item in data: result += item + "=" + data[item] + "&" result = result + "freeinstall=&startStatus=&endStatus=&timeout=&todayOtherReservationConditionName=&productBrand=&productType1=&productType2=&productType3=&orderId=&bizOrderId=&ordernoGroup=&customerName=&customerPhone=&serviceStreet=&wareId=&productName=&orderStatus=&orderStatusGroup=&createOrderTimeBegin=&createOrderTimeEnd=&reservationDateBegin=&reservationDateEnd=&firstReservationTimeBegin=&firstReservationTimeEnd=&changedReservationDateBegin=&changedReservationDateEnd=&feedbackStatus=&orderOrderStatus=&expectAtHomeDateBegin=&expectAtHomeDateEnd=&atHomeFinishDateBegin=&atHomeFinishDateEnd=&deliveryDateStart=&deliveryDateEnd=&homePageDistinguish=&fastDealNumByColor=&reservationStatus=&reportLessFlag=&superExperienceStore=&sourceOrderIdGroup=&sellerId=&sellerName=&eclpBusinessNo=&isFast=" print(result) params = {} datas = result.split("&") for data in datas: content = data.split("=") if len(content) > 1: params[content[0]] = content[1] response = session.post("http://jdfw.jd.com/receipt/query.json", headers=headers, data=params) print(response.text)
/huadi_zb.py
# -*- coding: utf-8 -*- import requests import json from bs4 import BeautifulSoup import re from datetime import date, timedelta, datetime from Util import Util class HDScrap(Util): def __init__(self, username='01007544', pwd='160324', baseurl="http://cc.vatti.com.cn:8180", adminid='3', bjdomain='http://yxgtest.bangjia.me', companyid='9'): self.session = requests.Session() self.username = username self.passwd = pwd self.baseurl = baseurl self.codeFaultTimes = 0 self.loginFaultTimes = 0 self.adminid = adminid self.bjdomain = bjdomain self.datasuccess = {'code': 1, 'msg': '抓单成功', 'element': ''} self.datafail = {'code': 0, 'msg': '登录失败,请检查账号密码是否正确'} self.isSucess = False self.companyid = companyid self.mainurl = None self.headers = {'Content-type': 'text/html', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,pt;q=0.6', 'Connection': 'keep-alive', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,' '*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Host': "cc.vatti.com.cn:8180", 'Origin': baseurl, # 'User-Agent': agent, 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/79.0.3945.88 Safari/537.36"} def get_lsdata(self, element): data = element["lsdata"] data = data.replace(r"true", '1') data = data.replace(r"false", '0') return eval(data)[2] def get_value(self, element): return element["value"] def loginHd(self): loginurl = self.baseurl + '/sap/bc/bsp/sap/crm_ui_start/default.htm?sap-client=800&sap-language=ZH' print("url=" + loginurl + ",passwd=" + self.passwd) self.headers['Referer'] = loginurl loginRes = self.session.get(loginurl, headers=self.headers) loginRes.encoding = 'utf-8' bsObj = BeautifulSoup(loginRes.text, features="lxml") # print("=========================") processname = self.get_value(bsObj.find("input", {"name": "sap-system-login"})) sap_client = self.get_value(bsObj.find("input", {"id": "sap-client"})) loginxsrf = bsObj.find("input", {"name": "sap-login-XSRF"})["value"] params = {"FOCUS_ID": self.get_value(bsObj.find("input", {"id": "FOCUS_ID"})), "sap-system-login-oninputprocessing": processname, "sap-system-login": processname, "sap-login-XSRF": loginxsrf, "sysid": self.get_lsdata(bsObj.find("input", {"id": "sysid"})), "sap-client": sap_client, "sap-user": self.username, "sap-password": self.passwd, "SAPEVENTQUEUE": "Form_Submit~E002Id~E004SL__FORM~E003~E002ClientAction~E004submit~E005ActionUrl~E004" "~E005ResponseData~E004full~E005PrepareScript~E004~E003~E002~E003", "sap-language": self.get_value(bsObj.find("input", {"id": "sap-language"})), "sap-language-dropdown": self.get_value(bsObj.find("input", {"id": "sap-language-dropdown"}))} self.headers['Content-type'] = "application/x-www-form-urlencoded" checkRes = self.session.post(loginurl, data=params, headers=self.headers) self.selectrole() return self.checkstatus(checkRes) def checkstatus(self, response, callback=None): bsObj = self.getsoup(response) nextbtn = bsObj.find_all("a", {"id": "SESSION_QUERY_CONTINUE_BUTTON"}) logonbtn = bsObj.find_all("a", {"id": "LOGON_BUTTON"}) # 如果账号密码错误 或者其他问题,直接返回 if response.status_code != 200: return self.datafail # 如果有其他账户在登陆,点击继续 elif nextbtn: return self.continuelogon() elif logonbtn: return self.datafail if callback: return callback(bsObj) return self.datasuccess def continuelogon(self, callback=None): """ 点击继续,踢掉其他用户继续当前会话 """ print("有其他账户登陆,点击继续") params = {"FOCUS_ID": "SESSION_QUERY_CONTINUE_BUTTON", "sap-system-login-oninputprocessing": "onSessionQuery", "sap-system-login": "onSessionQuery", "sap-client": '800', "SAPEVENTQUEUE": "Form_Submit~E002Id~E004SL__FORM~E003~E002ClientAction~E004submit~E005ActionUrl~E004" "~E005ResponseData~E004full~E005PrepareScript~E004~E003~E002~E003", "sap-language": 'ZH', "delete-session-cb": 'X', "delete_session": 'X' } self.headers['Content-type'] = "application/x-www-form-urlencoded" url = self.baseurl + '/sap/bc/bsp/sap/crm_ui_start/default.htm' checkRes = self.session.post(url, data=params, headers=self.headers) # print(checkRes.status_code) if checkRes.status_code != 200: return self.datafail result = self.selectrole() if callback: return callback() return result def selectrole(self): # print('=========================选择角色') url = self.baseurl + "/sap/bc/bsp/sap/crm_ui_frame/main.htm?sap-client=800&sap-language=ZH&sap-domainRelax" \ "=min&saprole=ZIC_AGENT_08&sapouid=50000265&sapoutype=S" roleRes = self.session.get(url, headers=self.headers) if roleRes.status_code != 200: return self.datafail return self.datasuccess def getsoup(self, response): # print(response.status_code) response.encoding = 'utf-8' return BeautifulSoup(response.text, features="lxml") def transfer_order(self, statuscode=None): # print('=========================loadFrame1 加载左边的动作栏') url = self.mainurl if not url or len(url) <= 1: url = self.baseurl + "/sap/bc/bsp/sap/crm_ui_frame/BSPWDApplication.do?sap-client=800&sap-language=ZH&sap" \ "-domainrelax=min&saprole=ZIC_AGENT_08&sapouid=50000265&sapoutype=S" self.headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9' del self.headers['Content-type'] actionRes = self.session.get(url, headers=self.headers) # print(actionRes.text) result = self.checkstatus(actionRes) if result['code'] == 1: try: bsObj = self.getsoup(actionRes) if not bsObj: return self.datafail sercureid = self.get_value(bsObj.find("input", {"id": "wcf-secure-id"})) cb_flash = self.get_value(bsObj.find("input", {"id": "callbackFlashIslands"})) cb_light = self.get_value(bsObj.find("input", {"id": "callbackSilverlightIslands"})) data = {"data": json.dumps(self.loadallsearch(sercureid, cb_flash, cb_light, statuscode))} if statuscode: data['vatti_type'] = 1 # print("transfer_order:", data) result = requests.post(self.bjdomain + "/Api/Climborder/addorder", data=data) # print(result) except Exception as e: print("transfer_order exception", e) return self.datafail return self.datasuccess else: return self.datafail def loadsearch(self, sercureid, cb_flash, cb_light): # print('=========================loadsearch 加载工单查询初始页面') params = {"callbackFlashIslands": cb_flash, "callbackSilverlightIslands": cb_light, "htmlbevt_frm": "myFormId", "htmlbevt_cnt": "0", "onInputProcessing": "htmlb", "htmlbevt_ty": "thtmlb:link:click:0", "htmlbevt_id": "ZSRV-02-SR", "htmlbevt_oid": "C6_W29_V30_ZSRV-01-SR", "thtmlbKeyboardFocusId": "C6_W29_V30_ZSRV-01-SR", "sap-ajaxtarget": "C1_W1_V2_C6_W29_V30_MainNavigationLinks.do", "sap-ajax_dh_mode": "AUTO", "wcf-secure-id": sercureid, "PREFIX_ID": "C9_W36_V37_", "LTX_PREFIX_ID": "C1_W1_V2_", "sap-ajax_request": "X", "C4_W23_V24_V25_tv1_multiParameter": "0////0////0////0", "C8_W34_V35_RecentObjects_isExpanded": "yes", "C4_W23_V24_V25_tv1_isCellerator": "TRUE", "C4_W23_V24_V25_tv1_isNavModeActivated": "TRUE", "C4_W23_V24_V25_tv1_filterApplied": "FALSE", "C4_W23_V24_V25_tv1_editMode": "NONE", "C4_W23_V24_V25_tv1_firstTimeRendering": "NO", "C9_W36_V37_POLLFREE_ALERTS": "{&#34;Alerts&#34;:[]}", "C4_W23_V24_V25_tv1_configHash": "827DEA574484325768AF0E54A8EB7CBF8083ED01", "C3_W18_V19_V21_searchcustomer_struct.reltyp": "BUR001", "C4_W23_V24_V25_tv1_bindingString": "//CUSTOMERS/Table", "C13_W47_V48_SearchMenuAnchor1": "UP" } sap = re.findall(re.compile(r'[(](.*?)[)]', re.S), params['callbackFlashIslands'])[0] url = self.baseurl + "/sap(%s)/bc/bsp/sap/crm_ui_frame/BSPWDApplication.do" % sap print("loadsearch url={}".format(url)) self.mainurl = url self.headers['Content-type'] = "application/x-www-form-urlencoded" self.headers['Referer'] = url # 该参数代表了是否异步加载,如果加了这个选项,会只能接受到建议的网页,导致解析出错,浪费2天时间 # self.headers['X-Requested-With'] = "XMLHttpRequest" self.headers['Accept'] = "*/*" roleRes = self.session.post(url, data=params, headers=self.headers) # print(roleRes.text) return self.getsoup(roleRes), params def loadallsearch(self, sercureid, cb_flash, cb_light, statuscode=None): soup, params = self.loadsearch(sercureid, cb_flash, cb_light) confighash = str(soup.find("input", {"id": "C17_W61_V62_V64_ResultTable_configHash"})["value"]) order = list(self.search(confighash, params, 0, statuscode=statuscode)) return order def search(self, confighash, _params, page, totalcount=100, pagecount=50, statuscode=None): # print('=========================loadsearch 搜索', '增值工单' if not statuscode else '安装工单') target = "C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_SearchViewSet.do" if page == 0 else "C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_C17_W61_V62_V64_advancedsrl.do" oid = "C17_W61_V62_Searchbtn" if page == 0 else "C17_W61_V62_V64_ResultTable" focusid = "C17_W61_V62_Searchbtn" if page == 0 else "C17_W61_V62_V64_ResultTable_pag_pg-%d" % page params = { "callbackFlashIslands": _params['callbackFlashIslands'], "callbackSilverlightIslands": _params['callbackSilverlightIslands'], "htmlbevt_frm": "myFormId", "htmlbevt_cnt": "0" if page == 0 else "1", "onInputProcessing": "htmlb", "htmlbevt_ty": "htmlb:button:click:0" if page == 0 else "thtmlb:tableView:navigate:null", "htmlbevt_id": "SEARCH_BTN" if page == 0 else "tvNavigator", "sap-ajax_dh_mode": "AUTO", "wcf-secure-id": _params['wcf-secure-id'], "PREFIX_ID": "C9_W36_V37_", "LTX_PREFIX_ID": "C1_W1_V2_", "sap-ajax_request": "X", "crmFrwScrollXPos": "0", "crmFrwScrollYPos": "267", "crmFrwOldScrollXPos": "0", "crmFrwOldScrollYPos": "267", "thtmlbScrollAreaWidth": "0", "thtmlbScrollAreaHeight": "0", "C13_W47_V48_SearchMenuAnchor1": "UP", 'htmlbevt_oid': oid, 'thtmlbKeyboardFocusId': focusid, 'sap-ajaxtarget': target, 'currentDate': datetime.now().year, 'C17_W61_V62_V64_ResultTable_configHash': confighash, 'C17_W61_V62_V64_ResultTable_multiParameter': "0////0////0////0", 'C17_W61_V62_V64_ResultTable_bindingString': "//BTQRSrvOrd/Table", 'C17_W61_V62_V64_ResultTable_sortValue': 'CREATED_AT#:#desc#!#', 'C17_W61_V62_V63_btqsrvord_max_hits': "9" if statuscode else "9", # 一次查询最大多少条 'C17_W61_V62_thtmlbShowSearchFields': "true", 'C17_W61_V62_V64_ResultTable_isNavModeActivated': "TRUE", 'C17_W61_V62_V64_ResultTable_filterApplied': "FALSE", 'C17_W61_V62_V64_ResultTable_isCellerator': "TRUE", 'C17_W61_V62_V64_ResultTable_editMode': "NONE", 'C17_W61_V62_V64_ResultTable_visibleFirstRow': str(1 + page * 10), "C17_W61_V62_V63_btqsrvord_parameters[1].FIELD": "POSTING_DATE", "C17_W61_V62_V63_btqsrvord_parameters[1].OPERATOR": "GT" if not statuscode else "EQ", "C17_W61_V62_V63_btqsrvord_parameters[1].VALUE1": (date.today() - timedelta(days=1)).strftime("%Y.%m.%d"), "C17_W61_V62_V63_btqsrvord_parameters[1].VALUE2": "", "C17_W61_V62_V63_btqsrvord_parameters[2].FIELD": "ZZFLD000057", "C17_W61_V62_V63_btqsrvord_parameters[2].OPERATOR": "EQ", "C17_W61_V62_V63_btqsrvord_parameters[2].VALUE1": "", "C17_W61_V62_V63_btqsrvord_parameters[2].VALUE2": "", "C17_W61_V62_V63_btqsrvord_parameters[3].FIELD": "ZZFLD000063", "C17_W61_V62_V63_btqsrvord_parameters[3].OPERATOR": "EQ", "C17_W61_V62_V63_btqsrvord_parameters[3].VALUE1": "", "C17_W61_V62_V63_btqsrvord_parameters[3].VALUE2": "", "C17_W61_V62_V63_btqsrvord_parameters[4].FIELD": "ZZFLD00005P", "C17_W61_V62_V63_btqsrvord_parameters[4].OPERATOR": "EQ", "C17_W61_V62_V63_btqsrvord_parameters[4].VALUE1": "01" if not statuscode else "", # 工单来源是HD-华帝 "C17_W61_V62_V63_btqsrvord_parameters[4].VALUE2": "", "C17_W61_V62_V63_btqsrvord_parameters[5].FIELD": "OBJECT_ID", "C17_W61_V62_V63_btqsrvord_parameters[5].OPERATOR": "EQ", "C17_W61_V62_V63_btqsrvord_parameters[5].VALUE1": "", "C17_W61_V62_V63_btqsrvord_parameters[5].VALUE2": "", "C17_W61_V62_V63_btqsrvord_parameters[6].FIELD": "PROCESS_TYPE", "C17_W61_V62_V63_btqsrvord_parameters[6].OPERATOR": "EQ", "C17_W61_V62_V63_btqsrvord_parameters[6].VALUE1": "ZIC6" if not statuscode else "ZIC3", # 工单类型为 增值服务单 "C17_W61_V62_V63_btqsrvord_parameters[6].VALUE2": "", "C17_W61_V62_V63_btqsrvord_parameters[7].FIELD": "ZZFLD00003J", "C17_W61_V62_V63_btqsrvord_parameters[7].OPERATOR": "EQ", "C17_W61_V62_V63_btqsrvord_parameters[7].VALUE1": "", "C17_W61_V62_V63_btqsrvord_parameters[7].VALUE2": "", # "C17_W61_V62_V63_btqsrvord_parameters[8].FIELD": "STATUS_COMMON", # "C17_W61_V62_V63_btqsrvord_parameters[8].OPERATOR": "EQ", # "C17_W61_V62_V63_btqsrvord_parameters[8].VALUE1": "M0002ZSIC0002", # 状态为工单提交 # "C17_W61_V62_V63_btqsrvord_parameters[8].VALUE2": "", # "C17_W61_V62_V63_btqsrvord_parameters[9].FIELD": "ZZFLD000062", # "C17_W61_V62_V63_btqsrvord_parameters[9].OPERATOR": "EQ", # "C17_W61_V62_V63_btqsrvord_parameters[9].VALUE1": "", # "C17_W61_V62_V63_btqsrvord_parameters[9].VALUE2": "", 'C17_W61_V62_V64_ResultTable_firstTimeRendering': "NO", "C9_W36_V37_POLLFREE_ALERTS": "{&#34;Alerts&#34;:[]}", "C17_W61_V62_V64_ResultTable_rowCount": "0" if page == 0 else str(totalcount) } # if statuscode: # params["C17_W61_V62_V63_btqsrvord_parameters[8].FIELD"] = "STATUS_COMMON" # params["C17_W61_V62_V63_btqsrvord_parameters[8].OPERATOR"] = "EQ" # params["C17_W61_V62_V63_btqsrvord_parameters[8].VALUE1"] = statuscode # params["C17_W61_V62_V63_btqsrvord_parameters[8].VALUE2"] = "" if page != 0: params['htmlbevt_par1'] = "page:%d,%d,%d,%d,P" % (page + 1, 1 + page * pagecount, pagecount, totalcount) sap = re.findall(re.compile(r'[(](.*?)[)]', re.S), params['callbackFlashIslands'])[0] url = self.baseurl + "/sap(%s)/bc/bsp/sap/crm_ui_frame/BSPWDApplication.do" % sap self.headers['Content-type'] = "application/x-www-form-urlencoded" self.headers['Referer'] = url print("page={},totalcount={},url={},headers={}".format(page, totalcount, url, self.headers)) roleRes = self.session.post(url, data=params, headers=self.headers) bsObj = self.getsoup(roleRes) # if statuscode: # print("search result={}".format(roleRes.text)) resulttable = bsObj.find("table", {"id": "C17_W61_V62_V64_ResultTable_TableHeader"}).find("tbody") totalcount = int(bsObj.find("input", {"id": "C17_W61_V62_V64_ResultTable_rowCount"})["value"]) isall = (page + 1) * pagecount >= totalcount print("totalcount=%d" % totalcount + ",page=%d" % page + ",isallloaded=%d" % isall) if resulttable: yield from self.parseorderlist(resulttable.find_all("tr"), url, params, statuscode) if not isall: yield from self.search(confighash, _params, page + 1, totalcount, pagecount, statuscode) def parseorderlist(self, trlist, url, params, statuscode): for tr in trlist: tablecolumns = tr.find_all("td") if tr and len(tablecolumns) > 2: data = self.parseorder(tablecolumns, statuscode) if data: yield from self.orderdetail(data, url, params, statuscode) def parseorder(self, tablecolumns, statuscode=None): try: orderno_td = tablecolumns[1] name_td = tablecolumns[3] data = {} orderitem = orderno_td.find("a") nameaddress = self.finda(name_td).split(" / ") if orderitem and orderitem.has_attr('id'): data['oid'] = orderitem["id"] # 这个是上一个列表中的工单号元素id,下一个页面需要用到 data['pid'] = name_td.find("a")['id'] # 这个是上一个列表中的用户名元素id,下一个页面需要用到 data['factorynumber'] = self.finda(orderno_td) data['username'] = nameaddress[0] data['originname'] = self.findspan(tablecolumns[4]) data['ordertime'] = self.findspan(tablecolumns[7]).replace(".", '-') data['companyid'] = self.companyid data['machinebrand'] = "华帝" data['orderstatus'] = "工单提交" data['adminid'] = self.adminid if len(nameaddress) > 1 and "-" in nameaddress[1]: address = nameaddress[1].split("-") if len(address) > 1: data['city'] = address[0] data['county'] = address[1] # print("parseorder data=") # print(data) if data['username']: data['username'] = data['username'].split(" ")[0] return data if not statuscode or self.isNew(data, self.bjdomain, self.adminid) else None except Exception as e: print("parseorder exception", e) return None def orderdetail(self, data, url, params, statuscode): # print('=========================orderdetail 获取工单详情') oid = data['oid'] params['htmlbevt_ty'] = "thtmlb:link:click:0" params['htmlbevt_oid'] = oid params['thtmlbKeyboardFocusId'] = oid params['htmlbevt_id'] = "HEADEROV" params['htmlbevt_cnt'] = "0" params['currentDate'] = datetime.now().year params['sap-ajaxtarget'] = "C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_C17_W61_V62_V64_advancedsrl.do" if 'htmlbevt_par1' in params: del params['htmlbevt_par1'] roleRes = self.session.post(url, data=params, headers=self.headers) bsObj = self.getsoup(roleRes) if statuscode: # print(roleRes.text) data['orderstatus'] = "服务完成" if statuscode == "M0010ZSIC0003" else "回访完成" data['machinetype'] = bsObj.find("span", {"id": "C19_W69_V72_V75_thtmlb_textView_28"}).text.strip() # 机器类型 data['buydate'] = bsObj.find("span", {"id": "C19_W69_V72_V75_btadminh_ext.zzfld00002y"}).text.strip() # 购买日期 data['ordername'] = "安装" data['sn'] = bsObj.find("span", {"id": "C19_W69_V72_V75_btadminh_ext.zzfld00001r"}).text.strip() # 条码 data['version'] = self.getTableRow(bsObj, "C23_W85_V86_V88_Table_TableHeader", lambda row: self.finda(row[3]) + "|") # 产品编号 拼接 data['machine_dsc'] = self.getTableRow(bsObj, "C23_W85_V86_V88_Table_TableHeader", lambda row: self.finda(row[6]) + "|") # 产品编号 拼接 data = self.getFinishTime(data, url, params) else: user_tr = bsObj.find("div", {"id": "C19_W69_V72_0003Content"}).find("tbody").find("tr") data['mobile'] = user_tr.find('span', id=re.compile('partner_no')).text.strip() data['address'] = user_tr.find('span', id=re.compile('address_short')).text.strip() data['repairtime'] = bsObj.find("span", {"id": "C19_W69_V72_V74_btadminh_ext.zzfld00003j"}).text.strip() # 预约时间 data['machinetype'] = bsObj.find("span", {"id": "C19_W69_V72_V74_thtmlb_textView_30"}).text.strip() # 机器类型 data['buydate'] = bsObj.find("span", {"id": "C19_W69_V72_V74_btadminh_ext.zzfld00002y"}).text.strip() # 购买日期 data['ordername'] = bsObj.find("span", {"id": "C19_W69_V72_V74_thtmlb_textView_20"}).text.strip() # 增值服务项 data['description'] = self.getTableRow(bsObj, "C23_W83_V84_V85_TextList_TableHeader" if not statuscode else "C24_W90_V91_V92_TextList_TableHeader", lambda row: self.findspan(row[0]) + ":" + self.finda(row[1]) + "\n") yield self.userdetail(data, url, params, statuscode) def getFinishTime(self, data, url, params): # print('=========================getFinishTime 获取工单完工时间') param = {"callbackFlashIslands": params["callbackFlashIslands"], "callbackSilverlightIslands": params["callbackSilverlightIslands"], "wcf-secure-id": params["wcf-secure-id"], "LTX_PREFIX_ID": params["LTX_PREFIX_ID"], "PREFIX_ID": 'C9_W36_V37_', "crmFrwScrollXPos": '0', "crmFrwOldScrollXPos": '0', "currentDate": params["currentDate"], 'htmlbevt_ty': "thtmlb:tableView:navigate:null", 'htmlbevt_oid': "C31_W114_V115_DatesTable", 'htmlbevt_frm': "myFormId", 'htmlbevt_id': "tvNavigator", 'htmlbevt_cnt': "1", 'htmlbevt_par1': "page:2,11,10,18,P", 'sap-ajaxtarget': "C1_W1_V2_C1_W1_V2_V3_C19_W69_V72_C31_W114_V115_Dates.do", 'sap-ajax_dh_mode': "AUTO", 'onInputProcessing': "htmlb", 'C13_W47_V48_SearchMenuAnchor1': "UP", 'C8_W34_V35_RecentObjects_isExpanded': "yes", 'C23_W85_V86_V88_Table_editMode': "NONE", 'C19_W69_V72_0001_displaymode': "X", 'C23_W85_V86_V87_itemobjecttype_itemobjecttype': "ALL", 'C23_W85_V86_V88_Table_isCellerator': "TRUE", 'C23_W85_V86_V88_Table_rowCount': "1", 'C23_W85_V86_V88_Table_visibleFirstRow': "1", 'C23_W85_V86_V88_Table_bindingString': "//BTAdminI/Table", 'C23_W85_V86_V88_Table_isNavModeActivated': "TRUE", 'C23_W85_V86_V88_Table_configHash': "9EEC78D4306657883F5C86BEFC0745B37DA819FE", 'C23_W85_V86_V88_Table_multiParameter': "0////0////0////0", 'C19_W69_V72_0002_displaymode': "X", 'C24_W90_V91_V92_TextList_rowCount': "3", 'C24_W90_V91_V92_TextList_visibleFirstRow': "1", 'C24_W90_V91_V92_TextList_bindingString': "//Text/Table", 'C24_W90_V91_V92_TextList_isNavModeActivated': "TRUE", 'C24_W90_V91_V92_TextList_configHash': "0E513D2C7268EC204F42B18C06AFE9CDEC0335E5", 'C24_W90_V91_V92_TextList_multiParameter': "0////0////0////0", 'C19_W69_V72_0003_displaymode': "X", 'C25_W94_V95_Table_isCellerator': "TRUE", 'C25_W94_V95_Table_rowCount': "0", 'C25_W94_V95_Table_visibleFirstRow': "1", 'C25_W94_V95_Table_bindingString': "//DocList/Table", 'C25_W94_V95_Table_isFrontendSelection': "TRUE", 'C25_W94_V95_Table_isNavModeActivated': "TRUE", 'C25_W94_V95_Table_configHash': "2B1898492BCC377ECF844081E0C8B91EEB805379", 'C25_W94_V95_Table_multiParameter': "0////0////0////0", 'C19_W69_V72_0004_displaymode': "X", 'C19_W69_V72_0006_displaymode': "X", 'C27_W103_V104_ConfCellTable_isCellerator': "TRUE", 'C27_W103_V104_ConfCellTable_rowCount': "0", 'C27_W103_V104_ConfCellTable_visibleFirstRow': "1", 'C27_W103_V104_ConfCellTable_bindingString': "//TranceList/Table", 'C27_W103_V104_ConfCellTable_isNavModeActivated': "TRUE", 'C27_W103_V104_ConfCellTable_configHash': "7D633AD0A8F7098E6A03D3F0BBA3020EB7F11686", 'C27_W103_V104_ConfCellTable_multiParameter': "0////0////0////0", 'C19_W69_V72_0007_displaymode': "X", 'C19_W69_V72_0008_displaymode': "X", 'C29_W108_V109_ConfCellTable_isCellerator': "TRUE", 'C29_W108_V109_ConfCellTable_rowCount': "0", 'C29_W108_V109_ConfCellTable_visibleFirstRow': "1", 'C29_W108_V109_ConfCellTable_bindingString': "//ZCall/Table", 'C29_W108_V109_ConfCellTable_isNavModeActivated': "TRUE", 'C29_W108_V109_ConfCellTable_configHash': "E24612518975848E7FAA1EF476EBF26F7D025301", 'C29_W108_V109_ConfCellTable_multiParameter': "0////0////0////0", 'C19_W69_V72_0009_displaymode': "X", 'C30_W110_V111_TABLE_rowCount': "0", 'C30_W110_V111_TABLE_visibleFirstRow': "1", 'C30_W110_V111_TABLE_bindingString': "//ZTAB00011F/Table", 'C30_W110_V111_TABLE_isFrontendSelection': "TRUE", 'C30_W110_V111_TABLE_isNavModeActivated': "TRUE", 'C30_W110_V111_TABLE_configHash': "47B16290F9622C8097E999109F42C028F547915D", 'C30_W110_V111_TABLE_multiParameter': "0////0////0////0", 'C19_W69_V72_0010_displaymode': "X", 'C31_W114_V115_DatesTable_isCellerator': "TRUE", 'C31_W114_V115_DatesTable_rowCount': "18", 'C31_W114_V115_DatesTable_visibleFirstRow': "11", 'C31_W114_V115_DatesTable_bindingString': "//BTDate/Table", 'C31_W114_V115_DatesTable_isNavModeActivated': "TRUE", 'C31_W114_V115_DatesTable_configHash': "F1047D2E37AE2DE80BA46A1E06588EDC4440CA8A", 'C31_W114_V115_DatesTable_multiParameter': "0////0////0////0", 'C19_W69_V72_0011_displaymode': "X", 'thtmlbOverviewControllerID': "C19_W69_V72", 'crmFrwScrollYPos': "891", 'crmFrwOldScrollYPos': "891", 'thtmlbKeyboardFocusId': "C31_W114_V115_DatesTable_pag_pg-1", 'sap-ajax_request': "X"} url = url + "?sap-client=800&sap-language=ZH&sap-domainrelax=min&saprole=ZIC_AGENT_08&sapouid=50000265&sapoutype=S" # print("self.headers=", self.headers, ",url=", url) userRes = self.session.post(url, data=param, headers=self.headers) # print("param=", param) # print("getFinishTime result:", userRes.text) bsObj = self.getsoup(userRes) try: data['repairtime'] = self.getTableRow(bsObj, "C31_W114_V115_DatesTable_TableHeader", lambda r: self.findspan(r[1]).replace(".", '-') + " " + self.findspan( r[2]), row_no=-4, truncate=False) # crm完工日期作为安装日期 except Exception as e: print("getFinishTime exception", e) return data def userdetail2(self, data, url, params): # print('=========================userdetail2 从工单详情进入 查看用户详情') data['pid'] = 'C24_W88_V89_btpartner_table[1].thtmlb_oca.EDIT' # 通过元素获取? oid = data['oid'] pid = data['pid'] del data['pid'] del data['oid'] param = params.copy() param['htmlbevt_ty'] = "thtmlb:image:click:null::CL_THTMLB_TABLE_VIEW::EDIT.1" param['htmlbevt_oid'] = pid param['thtmlbKeyboardFocusId'] = pid param['htmlbevt_id'] = "ONE_CLICK_ACTION" param['htmlbevt_cnt'] = "0" param['sap-ajaxtarget'] = "C1_W1_V2_C1_W1_V2_V3_C24_W84_V87_C29_W103_V104_Partner.do" param['C23_W85_V86_V88_Table_configHash'] = "9EEC78D4306657883F5C86BEFC0745B37DA819FE" param['C24_W90_V91_V92_TextList_configHash'] = "0E513D2C7268EC204F42B18C06AFE9CDEC0335E5" param['C24_W90_V91_V92_TextList_multiParameter'] = "0////0////0////0" param['C24_W90_V91_V92_TextList_bindingString'] = "//Text/Table" userRes = self.session.post(url, data=param, headers=self.headers) bsObj = self.getsoup(userRes) data['mobile'] = str(bsObj.find("input", {"id": "C30_W123_V124_commdata_telephonetel"})["value"]) data['province'] = str(bsObj.find("input", {"id": "C30_W119_V120_postaldata_region_text"})["value"]) data['city'] = str(bsObj.find("input", {"id": "C30_W119_V120_postaldata_city"})["value"]) data['county'] = str(bsObj.find("input", {"id": "C30_W119_V120_postaldata_district"})["value"]) data['address'] = str(bsObj.find("input", {"id": "C30_W119_V120_postaldata_street"})["value"]) # 用户详细地址 data = self.clearAddress(data) # print('=========================orderdetail2 最终数据') # print(data) self.back2order(pid, url, params) self.back2orderlist(oid, url, params) return data def filterstr(self, address, filterstr): if address and filterstr and filterstr in address and address.startswith(filterstr): return address.replace(filterstr, '', 1) else: return address def userdetail(self, data, url, params, statuscode): # print('=========================userdetail 从工单列表进入查看用户详情') oid = data['oid'] self.back2orderlist(oid, url, params) # 返回到工单列表 del data['oid'] pid = data['pid'] del data['pid'] params['htmlbevt_ty'] = "thtmlb:link:click:0" params['htmlbevt_oid'] = pid params['thtmlbKeyboardFocusId'] = pid params['htmlbevt_id'] = "SOLD_TO_PARTY" params['htmlbevt_cnt'] = "0" params['sap-ajaxtarget'] = "C1_W1_V2_C1_W1_V2_V3_C17_W61_V62_C17_W61_V62_V64_advancedsrl.do" params['C17_W61_V62_V64_ResultTable_configHash'] = "F698293684A5C954932EE6CB006466A1645E5EF5" userRes = self.session.post(url, data=params, headers=self.headers) bsObj = self.getsoup(userRes) # C30_W119_V120_postaldata_street data['mobile'] = bsObj.find('span', id=re.compile('.TELEPHONE')).text.strip() # 用户电话 data['city'] = bsObj.find('input', id=re.compile('.city'))["value"] # 用户城市 data['address'] = str(bsObj.find('input', id=re.compile('.street'))["value"]) # 用户详细地址 data = self.clearAddress(data) # print('=========================orderdetail 最终数据') # print(data) self.back2orderlist(pid, url, params) return data def back2order(self, id, url, params): # print('=========================后退到工单详情') params_new = params.copy() params_new['htmlbevt_ty'] = "htmlb:button:click:0" params_new['htmlbevt_oid'] = "C24_W111_V112_V113_thtmlb_button_1" params_new['thtmlbKeyboardFocusId'] = "C24_W111_V112_V113_thtmlb_button_1" params_new['htmlbevt_id'] = "done" params_new['htmlbevt_cnt'] = "0" params_new['sap-ajaxtarget'] = "C1_W1_V2_C1_W1_V2_V3_C24_W111_V112_C24_W111_V112_V113_PartnerEFHeader.do" params_new['sap-ajax_dh_mode'] = "AUTO" params_new['C13_W47_V48_SearchMenuAnchor1'] = "UP" params_new['C8_W34_V35_RecentObjects_isExpanded'] = "yes" self.session.post(url, data=params_new, headers=self.headers) def back2orderlist(self, id, url, params): # print('=========================返回工单列表') params_new = params params_new['htmlbevt_ty'] = "htmlb:link:click:null" params_new['htmlbevt_oid'] = "C1_W1_V2_V3_V55_back" params_new['thtmlbKeyboardFocusId'] = id params_new['htmlbevt_id'] = "back" params_new['htmlbevt_cnt'] = "1" params_new['htmlbevt_par1'] = "#" params_new['C23_W83_V84_V85_TextList_bindingString'] = "//Text/Table" params_new['C24_W88_V89_Table_selectedRows'] = "1" params_new['C24_W88_V89_Table_rowCount'] = "1" params_new['thtmlbOverviewControllerID'] = "C19_W69_V72" params_new['C28_W104_V105_Table_bindingString'] = "//DocList/Table" params_new['C28_W104_V105_Table_configHash'] = "2B1898492BCC377ECF844081E0C8B91EEB805379" params_new['C28_W104_V105_Table_multiParameter'] = "0////0////0////0" params_new['C19_W69_V72_0006_displaymode'] = "X" params_new['C27_W101_V102_ConfCellTable_multiParameter'] = "7D633AD0A8F7098E6A03D3F0BBA3020EB7F11686" params_new['C27_W101_V102_ConfCellTable_configHash'] = "0////0////0////0" params_new['C24_W88_V89_Table_allRowSelected'] = "FALSE" params_new['C25_W92_V93_V95_Table_bindingString'] = "//BTAdminI/Table" params_new['sap-ajaxtarget'] = "C1_W1_V2_C1_W1_V2_V3_C1_W1_V2_V3_V55_BreadCrumbView.do" self.session.post(url, data=params_new, headers=self.headers) if __name__ == '__main__': hdscrap = HDScrap('01007544', pwd='160324', adminid='24', bjdomain='http://gsn.bangjia.me') res = hdscrap.loginHd() # grap_res = hdscrap.transfer_order() # print(grap_res) grap_res = hdscrap.transfer_order(statuscode='M0010ZSIC0003') print(grap_res) # grap_res = hdscrap.transfer_order(statuscode='M0013ZSIC0004') # print(grap_res)
/login.py
import datetime import json import os import sys import time import wx import wx.adv import wx.lib.mixins.inspection from apscheduler.triggers import interval from CDKUtil import CDKUtil from apscheduler.schedulers.background import BackgroundScheduler scheduler = BackgroundScheduler() AppTitle = "CDK抓单" VERSION = 0.1 def refresh_order(frame): print("refresh_order frame={}".format(frame)) success = wx.GetApp().cdkutil.loadHaierOrder() if not success: wx.GetApp().logout(frame) else: wx.GetApp().addCount() wx.GetApp().setLast() class MainFrame(wx.Frame): def __init__(self, userinfo): wx.Frame.__init__(self, parent=None, title='CDK抓单中...') self.loginTime = wx.GetApp().GetLoginTime() self.userinfo = userinfo self.makeStatusBar() self.initText() self.OnTimer(None) self.timer = wx.Timer(self) self.timer.Start(3000) self.Bind(wx.EVT_TIMER, self.OnTimer) wx.GetApp().startJob(self) def initText(self): textSizer = wx.BoxSizer(wx.VERTICAL) self.main_txt = wx.StaticText(self, -1, "登录时长 %s".format(MyApp.getCurrentDateTime() - self.loginTime), style=wx.ALIGN_CENTER) self.count_txt = wx.StaticText(self, -1, "同步次数:{}".format(wx.GetApp().getCount()), style=wx.ALIGN_CENTER) self.last_txt = wx.StaticText(self, -1, "最近更新时间:{}".format(wx.GetApp().getLast()), style=wx.ALIGN_CENTER) # center.SetForegroundColour('white') # center.SetBackgroundColour('black') textSizer.Add(self.main_txt, 0, wx.EXPAND, 10) textSizer.Add(self.count_txt, 0, wx.EXPAND, 10) textSizer.Add(self.last_txt, 0, wx.EXPAND, 10) self.SetSizer(textSizer) textSizer.Fit(self) def OnTimer(self, event): t = MyApp.getCurrentDateTime() sbTime = "当前时间 {}".format(t.strftime("%Y-%m-%d %H:%M:%S")) self.myStatusBar.SetStatusText(sbTime, 0) self.main_txt.SetLabel("登录时长 {}".format(t - self.loginTime)) self.count_txt.SetLabel("同步次数:{}".format(wx.GetApp().getCount())) self.last_txt.SetLabel("最近更新时间:{}".format(wx.GetApp().getLast())) self.Layout() def makeStatusBar(self): self.myStatusBar = self.CreateStatusBar(1) self.myStatusBar.SetFieldsCount(2) self.myStatusBar.SetStatusWidths([-8, -4]) self.myStatusBar.SetStatusText("", 0) self.myStatusBar.SetStatusText("bangjia.me.", 1) class LoginFrame(wx.Frame): def __init__(self): wx.Frame.__init__(self, parent=None, title=AppTitle) # panel = wx.Panel(self) self.main_sizer = wx.BoxSizer(wx.VERTICAL) userInfo = wx.GetApp().getUserInfo() if userInfo and 'username' in userInfo: default_name = userInfo['username'] else: default_name = "66004185" if userInfo and 'passwd' in userInfo: default_pwd = userInfo['passwd'] else: default_pwd = "Dw147259" self.txt_username = wx.TextCtrl(self, value=default_name) self.add_widgets("账号", self.txt_username) self.txt_password = wx.TextCtrl(self, value=default_pwd, style=wx.TE_PASSWORD) self.add_widgets("密码", self.txt_password) self.txt_code = wx.TextCtrl(self, value="") # 添加验证码图片,并加入页面布局,为第三行,第3列 # image = wx.Image(os.path.join(wx.GetApp().resource_path(''), "bitmaps",'item_empty.png'), # wx.BITMAP_TYPE_PNG).Rescale(80, 25).ConvertToBitmap() # 获取图片,转化为Bitmap形式 self.img_code = wx.StaticBitmap(self, -1) # 转化为wx.StaticBitmap()形式 self.img_code.Bind(wx.EVT_LEFT_DOWN, self.loadCodeImg) self.add_widgets("验证码", self.txt_code).Add(self.img_code, 0, wx.ALL, 5) # self.title = wx.TextCtrl(self, value="") # self.add_widgets("验证码", self.title) btn_sizer = wx.BoxSizer() save_btn = wx.Button(self, label="登录") save_btn.Bind(wx.EVT_BUTTON, self.on_save) exit_btn = wx.Button(self, label="退出") exit_btn.Bind(wx.EVT_BUTTON, self.on_exit) btn_sizer.Add(save_btn, 0, wx.ALL, 5) btn_sizer.Add(exit_btn, 0, wx.ALL, 5) # btn_sizer.Add(wx.Button(self, id=wx.ID_CANCEL), 0, wx.ALL, 5) self.main_sizer.Add(btn_sizer, 0, wx.CENTER) self.SetSizer(self.main_sizer) self.loadCodeImg() self.Show() self.main_window = None self.Bind(wx.EVT_BUTTON, self.OnExit, exit_btn) self.Bind(wx.EVT_CLOSE, self.OnExit) def add_widgets(self, label_text, text_ctrl): row_sizer = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(self, label=label_text, size=(50, -1)) row_sizer.Add(label, 0, wx.ALL, 5) row_sizer.Add(text_ctrl, 1, wx.ALL | wx.EXPAND, 5) self.main_sizer.Add(row_sizer, 0, wx.EXPAND) return row_sizer def loadCodeImg(self, event=None): # response = requests.get(url) # img = Image.open(BytesIO(response.content)) img = wx.GetApp().cdkutil.generateCode() # image = wx.Image(img.size[0], img.size[1]) image = wx.Image(img.size[0], img.size[1]) image.SetData(img.convert("RGB").tobytes()) self.img_code.SetBitmap(image.Rescale(80, 25).ConvertToBitmap()) def on_save(self, event): print("登录") # 开始登录,登录成功后保存信息到本地 username = self.txt_username.GetValue() passwd = self.txt_password.GetValue() code = self.txt_code.GetValue() wx.GetApp().cdkutil.username = username wx.GetApp().cdkutil.passwd = passwd success = wx.GetApp().cdkutil.checkCode(code, username, passwd) print("登录 success: {}".format(success)) # todo 写入文件? if success: wx.GetApp().SetLoginTime() self.main_window = MainFrame(wx.GetApp().getUserInfo()) self.main_window.SetSize(800, 527) self.main_window.Center() self.main_window.Show(True) self.Hide() self.main_window.Bind(wx.EVT_CLOSE, self.on_exit) else: wx.GetApp().cdkutil.token = '' userinfo = {"username": username, "passwd": passwd, "token": '', 'islogin': False, 'orderurl': ''} wx.GetApp().setUserInfo(userinfo) def on_exit(self, event): print("exit") user = wx.GetApp().getUserInfo() # closed_window = event.EventObject # if closed_window == self.main_window: # self.main_window = None # self.Show() # elif closed_window == self: # print('Carry out your code for when Main window closes') # event.Skip() self.OnExit(event) def OnClose(self): ret = wx.MessageBox("确定要退出吗 ?", AppTitle, wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE | wx.NO_DEFAULT) return ret def OnExit(self, event): # Ask for exit. print("OnExit") print(event) intChoice = self.OnClose() print(intChoice) if intChoice == 2: # Disconnect from server. # self.con.OnCloseDb() # 结束循环任务 wx.GetApp().stopJob() closed_window = event.EventObject if closed_window == self.main_window: self.main_window.Destroy() self.main_window = None # self.Show() # elif closed_window == self: # print('Carry out your code for when Main window closes') # event.Skip() userinfo = wx.GetApp().getUserInfo() userinfo['islogin'] = False wx.GetApp().setUserInfo(userinfo) self.Destroy() class MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin): def OnInit(self, redirect=False, filename=None, useBestVisual=False, clearSigInt=True): self.SetAppName("CDK抓单") self.InitInspection() self.installDir = os.path.split(os.path.abspath(sys.argv[0]))[0] # self.installDir = self.resource_path('') self.locale = wx.Locale(wx.LANGUAGE_CHINESE_SIMPLIFIED) self.loginTime = MyApp.getCurrentDateTime() path = os.path.join(self.installDir, "file") if not os.path.exists(path): os.makedirs(path) self.userfile = os.path.join(self.installDir, "file", "user.txt") self.apscheduler = BackgroundScheduler() self.cdkutil = CDKUtil() self.job = None self.loginFrame = None self.mainFrame = None self.count = 1 self.lasttime = self.loginTime print("OnInit sys.argv[0]={}".format(sys.argv[0])) print("OnInit installDir={}".format(self.installDir)) userinfo = self.getUserInfo() frame = None if userinfo and 'islogin' in userinfo and 'token' in userinfo: if userinfo['islogin'] and userinfo['token'] and len(userinfo['token']) > 5: self.cdkutil.token = userinfo['token'] self.cdkutil.username = userinfo['username'] self.cdkutil.passwd = userinfo['passwd'] self.cdkutil.orderurl = userinfo['orderurl'] self.mainFrame = MainFrame(userinfo) frame = self.mainFrame if not self.mainFrame: self.loginFrame = LoginFrame() frame = self.loginFrame frame.SetSize(800, 527) self.SetTopWindow(frame) frame.Center() frame.Show(True) return True def getUserInfo(self): if os.path.exists(self.userfile): with open(self.userfile, 'r') as f: userinfo = json.loads(f.read()) return userinfo return None def setUserInfo(self, userinfo): with open(self.userfile, 'w') as f: jsObj = json.dumps(userinfo) f.write(jsObj) @staticmethod def getCurrentDateTime(): return datetime.datetime.strptime(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()), "%Y-%m-%d %H:%M:%S") def SetLoginTime(self): self.loginTime = MyApp.getCurrentDateTime() # self.loginTime = time.localtime(time.time()) def GetLoginTime(self): return self.loginTime def startJob(self, frame): if not self.apscheduler: self.apscheduler = BackgroundScheduler() self.apscheduler.start() if not self.job: trigger = interval.IntervalTrigger(seconds=5 * 10) self.job = self.apscheduler.add_job(lambda: refresh_order(frame), trigger=trigger, id='task_sync_every_5m', replace_existing=True) # self.job = self.apscheduler.add_job(func=refresh_order, trigger='interval', args=[frame], # id='task_sync_every_5m', seconds=5 * 60) def stopJob(self): # self.apscheduler.shutdown(wait=False) if self.job: self.job.remove() self.job = None def logout(self, frame): print("logout") self.stopJob() userinfo = self.getUserInfo() userinfo['islogin'] = False self.setUserInfo(userinfo) wx.CallAfter(self.test, frame) def test(self, frame): print("test frame={}".format(frame)) ret = wx.MessageBox("账号登录过期,请尝试重新登录", AppTitle, wx.OK | wx.ICON_INFORMATION) # ret = dialog.ShowModal() print(ret) if wx.OK == ret: print("ok pressed") frame.Destroy() # a = MyDialog(self.GetTopWindow(), "Dialog").ShowModal() # print(a) def addCount(self): self.count = self.count + 1 def getCount(self): return self.count def setLast(self): self.lasttime = MyApp.getCurrentDateTime() def getLast(self): return self.lasttime def resource_path(self, relative_path): if hasattr(sys, '_MEIPASS'): return os.path.join(sys._MEIPASS, relative_path) return os.path.join(os.path.abspath("."), relative_path) class MyDialog(wx.Dialog): def __init__(self, parent, title): super(MyDialog, self).__init__(parent, title=title, size=(250, 150)) panel = wx.Panel(self) self.btn = wx.Button(panel, wx.ID_OK, label="ok", size=(50, 20), pos=(75, 50)) self.btn.Bind(wx.EVT_BUTTON, self.on_Ok) def on_Ok(self, event): print("MyDialog ok button clicked!!!") self.Close() if __name__ == '__main__': app = MyApp(redirect=False) app.MainLoop()
/master.py
import os import sys import time from datetime import datetime from datetime import timedelta import wx import wx.adv import wx.lib.mixins.inspection import wx.lib.mixins.listctrl as listmix import searchutil AppTitle = "报表管理" VERSION = 0.1 class MyListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin): def __init__(self, parent, id, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0): super(MyListCtrl, self).__init__(parent, id, pos, size, style) # ------------ listmix.ListCtrlAutoWidthMixin.__init__(self) # ------------ # Simplified init method. self.CreateColumns() self.SetProperties() # --------------------------------------------------------------------------- def CreateColumns(self): """ Create columns for listCtrl. """ self.InsertColumn(col=0, heading="ID", format=wx.LIST_FORMAT_LEFT) self.InsertColumn(col=1, heading="操作人", format=wx.LIST_FORMAT_LEFT) self.InsertColumn(col=2, heading="建单量", format=wx.LIST_FORMAT_LEFT) self.InsertColumn(col=3, heading="派单量", format=wx.LIST_FORMAT_LEFT) self.InsertColumn(col=4, heading="完工审核量", format=wx.LIST_FORMAT_LEFT) self.InsertColumn(col=5, heading="工资结算量", format=wx.LIST_FORMAT_LEFT) self.InsertColumn(col=6, heading="回访量", format=wx.LIST_FORMAT_LEFT) # ------------ # ASTUCE (Tip) - ListCtrlAutoWidthMixin : # pour diminuer le scintillement des colonnes # lors du redimensionnement de la mainframe, # regler la derniere colonne sur une largeur elevee. # Vous devez toujours visualiser l'ascenseur horizontal. # Set the width of the columns (x4). # Integer, wx.LIST_AUTOSIZE or wx.LIST_AUTOSIZE_USEHEADER. self.SetColumnWidth(col=0, width=50) self.SetColumnWidth(col=1, width=100) self.SetColumnWidth(col=2, width=60) self.SetColumnWidth(col=3, width=60) self.SetColumnWidth(col=4, width=110) self.SetColumnWidth(col=5, width=110) self.SetColumnWidth(col=6, width=60) def SetProperties(self): """ Set the list control properties (icon, font, size...). """ # Font size and style for listCtrl. fontSize = self.GetFont().GetPointSize() # Text attributes for columns title. # wx.Font(pointSize, family, style, weight, underline, faceName) if wx.Platform in ["__WXMAC__", "__WXGTK__"]: boldFont = wx.Font(fontSize - 1, wx.DEFAULT, wx.NORMAL, wx.NORMAL, False, "") self.SetForegroundColour("black") self.SetBackgroundColour("#ece9d8") # ecf3fd else: boldFont = wx.Font(fontSize, wx.DEFAULT, wx.NORMAL, wx.BOLD, False, "") self.SetForegroundColour("#808080") self.SetBackgroundColour("#ece9d8") # ecf3fd self.SetFont(boldFont) class MyFrame(wx.Frame): def __init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE | wx.CLIP_CHILDREN): super(MyFrame, self).__init__(parent=None, id=-1, title=title, style=style) # Returns application name. self.app_name = wx.GetApp().GetAppName() # Returns bitmaps folder. self.bitmaps_dir = wx.GetApp().GetBitmapsDir() # Returns icons folder. self.icons_dir = wx.GetApp().GetIconsDir() # Simplified init method. self.getAdminids() # 获取所有的网点 self.getMasters(0) # 获取网点下的所有师傅 self.SetProperties() # 设置界面的属性 self.MakeMenuBar() self.MakeStatusBar() self.CreateCtrls() self.BindEvents() self.DoLayout() self.OnTimer(None) self.timer = wx.Timer(self) self.timer.Start(3000) self.Bind(wx.EVT_TIMER, self.OnTimer) def getAdminids(self): pass def getMasters(self, adminid): pass def SetProperties(self): """ Set the frame properties (title, icon, size...). """ # Setting some frame properties. frameIcon = wx.Icon(os.path.join(self.icons_dir, "icon_wxWidgets.ico"), type=wx.BITMAP_TYPE_ICO) self.SetIcon(frameIcon) # Frame cursor. cursorHand = wx.Cursor(os.path.join(self.icons_dir, "hand.cur"), type=wx.BITMAP_TYPE_CUR) self.SetCursor(cursorHand) self.SetTitle("%s V%.1f" % (self.app_name, VERSION)) def MakeMenuBar(self): # Set an icon to the exit/about menu item. emptyImg = wx.Bitmap(os.path.join(self.bitmaps_dir, "item_empty.png"), type=wx.BITMAP_TYPE_PNG) exitImg = wx.Bitmap(os.path.join(self.bitmaps_dir, "item_exit.png"), type=wx.BITMAP_TYPE_PNG) helpImg = wx.Bitmap(os.path.join(self.bitmaps_dir, "item_about.png"), type=wx.BITMAP_TYPE_PNG) # menu. mnuFile = wx.Menu() mnuInfo = wx.Menu() # mnuFile. # Show how to put an icon in the menu item. menuItem1 = wx.MenuItem(mnuFile, -1, "布局查看\tCtrl+Alt+I", "布局查看工具 !") menuItem1.SetBitmap(emptyImg) mnuFile.Append(menuItem1) self.Bind(wx.EVT_MENU, self.OnOpenWidgetInspector, menuItem1) # Show how to put an icon in the menu item. menuItem2 = wx.MenuItem(mnuFile, wx.ID_EXIT, "退出\tCtrl+Q", "关闭 !") menuItem2.SetBitmap(exitImg) mnuFile.Append(menuItem2) self.Bind(wx.EVT_MENU, self.OnExit, menuItem2) # mnuInfo. # Show how to put an icon in the menu item. menuItem2 = wx.MenuItem(mnuInfo, wx.ID_ABOUT, "关于\tCtrl+A", "关于软件 !") menuItem2.SetBitmap(helpImg) mnuInfo.Append(menuItem2) self.Bind(wx.EVT_MENU, self.OnAbout, menuItem2) # menuBar. menubar = wx.MenuBar() # Add menu voices. menubar.Append(mnuFile, "文件") menubar.Append(mnuInfo, "关于") self.SetMenuBar(menubar) def MakeStatusBar(self): """ Create the status bar for my frame. """ # Statusbar. self.myStatusBar = self.CreateStatusBar(1) self.myStatusBar.SetFieldsCount(2) self.myStatusBar.SetStatusWidths([-8, -4]) self.myStatusBar.SetStatusText("", 0) self.myStatusBar.SetStatusText("bangjia.me.", 1) def getTodayDate(self, _date, _type): now = _date print(type(now)) zero_date = now - timedelta(hours=now.hour, minutes=now.minute, seconds=now.second, microseconds=now.microsecond) if _type == 0: return zero_date else: return zero_date + timedelta(hours=23, minutes=59, seconds=59) def CreateCtrls(self): """ Create some controls for my frame. """ # Font style for wx.StaticText. font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) font.SetWeight(wx.BOLD) self.adminid = None self.masters = None self.userids = [] self.startdate = self.getTodayDate(datetime.now(), 0) self.enddate = self.getTodayDate(datetime.now(), 1) # Widgets. self.panel = wx.Panel(self) # self.stEmployees = wx.StaticText(self.panel, -1, "Employees list :") # self.stEmployees.SetForegroundColour("gray") # self.stEmployees.SetFont(font) # Image list. self.il = wx.ImageList(16, 16, True) # Set an icon for the first column. self.bmp = wx.Bitmap(os.path.join(self.bitmaps_dir, "employee.png"), type=wx.BITMAP_TYPE_PNG) # Add image to list. self.img_idx = self.il.Add(self.bmp) self.listCtrl = MyListCtrl(self.panel, -1, style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.BORDER_SUNKEN) # Assign the image list to it. self.listCtrl.SetImageList(self.il, wx.IMAGE_LIST_SMALL) # Retrieve data from the database. # self.employeeData = self.OnLoadData() # # # Populate the wx.ListCtrl. # for i in self.employeeData: # index = self.listCtrl.InsertItem(self.listCtrl.GetItemCount(), # ((str(i[0])))) # self.listCtrl.SetItem(index, 1, i[1]) # self.listCtrl.SetItem(index, 2, i[2]) # self.listCtrl.SetItem(index, 3, i[3]) # self.listCtrl.SetItem(index, 4, i[4]) # self.listCtrl.SetItemImage(self.listCtrl.GetItemCount() - 1, # self.img_idx) # # # Alternate the row colors of a ListCtrl. # # Mike Driscoll... thank you ! # if index % 2: # self.listCtrl.SetItemBackgroundColour(index, "#ffffff") # else: # self.listCtrl.SetItemBackgroundColour(index, "#ece9d8") # ecf3fd self.stSearch = wx.StaticText(self.panel, -1, 'Search "Surname" :') self.txSearch = wx.TextCtrl(self.panel, -1, "", size=(100, -1)) self.txSearch.SetToolTip("Search employee !") self.StaticSizer = wx.StaticBox(self.panel, -1, "Commands :") self.StaticSizer.SetForegroundColour("red") self.StaticSizer.SetFont(font) self.bntSearch = wx.Button(self.panel, -1, "搜索") self.bntSearch.SetToolTip("搜索角色的操作单量 !") self.bntClear = wx.Button(self.panel, -1, "&Clear") self.bntClear.SetToolTip("Clear the search text !") self.bntShowAll = wx.Button(self.panel, -1, "&All") self.bntShowAll.SetToolTip("Show all !") self.bntNew = wx.Button(self.panel, -1, "&Insert") self.bntNew.SetToolTip("Insert a new employee !") self.bntEdit = wx.Button(self.panel, -1, "&Update") self.bntEdit.SetToolTip("Update selected employee !") self.bntDelete = wx.Button(self.panel, -1, "&Delete") self.bntDelete.SetToolTip("Delete selected employee !") self.bntClose = wx.Button(self.panel, -1, "&Quit") self.bntClose.SetToolTip("Close !") # 创建操作区元素 self.wangdian_text = wx.StaticText(self.panel, -1, "选择网点:") self.master_text = wx.StaticText(self.panel, -1, "选择操作人:") self.time_text = wx.StaticText(self.panel, -1, "操作时间:") self.to_text = wx.StaticText(self.panel, -1, "到") # ch1 = wx.ComboBox(self.panel, -1, value='C', choices=searchutil.getAdminids(), style=wx.CB_SORT) self.ch_adminid = wx.Choice(self.panel, -1, choices=searchutil.getAdminids()) self.ch_master = wx.Choice(self.panel, -1, choices=['全部']) self.start = wx.adv.DatePickerCtrl(self.panel, -1, size=(120, 22), style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) self.end = wx.adv.DatePickerCtrl(self.panel, -1, size=(120, 22), style=wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY) self.ch_adminid.SetSelection(0) self.adminid = 24 self.getAllMasters() def BindEvents(self): """ 添加事件处理 """ # self.txSearch.Bind(wx.EVT_TEXT, self.OnUpperCaseText) # # # Intercept the click on the wx.ListCtrl. # self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self.listCtrl) # self.Bind(wx.EVT_LIST_COL_BEGIN_DRAG, self.OnColBeginDrag, self.listCtrl) # self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated, self.listCtrl) # self.Bind(wx.EVT_BUTTON, self.OnSearch, self.bntSearch) # self.Bind(wx.EVT_BUTTON, self.OnClear, self.bntClear) # self.Bind(wx.EVT_BUTTON, self.OnShowAll, self.bntShowAll) # self.Bind(wx.EVT_BUTTON, self.OnNew, self.bntNew) # self.Bind(wx.EVT_BUTTON, self.OnEdit, self.bntEdit) # self.Bind(wx.EVT_BUTTON, self.OnDelete, self.bntDelete) self.Bind(wx.EVT_BUTTON, self.OnExit, self.bntClose) self.Bind(wx.EVT_CLOSE, self.OnExit) self.Bind(wx.EVT_CHOICE, self.on_choice_a, self.ch_adminid) self.Bind(wx.EVT_CHOICE, self.on_choice_m, self.ch_master) self.Bind(wx.adv.EVT_DATE_CHANGED, self.OnDateChanged, self.start) self.Bind(wx.adv.EVT_DATE_CHANGED, self.OnDateChanged2, self.end) def DoLayout(self): # Sizer. actionSizer = wx.BoxSizer(wx.HORIZONTAL) textSizer = wx.BoxSizer(wx.VERTICAL) mainSizer = wx.BoxSizer(wx.HORIZONTAL) btnSizer = wx.StaticBoxSizer(self.StaticSizer, wx.VERTICAL) # Assign widgets to sizers. # actionSizer # actionSizer.Add(self.stEmployees, 0, wx.BOTTOM, 5) actionSizer.Add(self.wangdian_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5) actionSizer.Add(self.ch_adminid, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5) actionSizer.Add(self.master_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5) actionSizer.Add(self.ch_master, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5) actionSizer.Add(self.time_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5) actionSizer.Add(self.start, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE) actionSizer.Add(self.to_text, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE, border=5) actionSizer.Add(self.end, 0, flag=wx.LEFT | wx.RIGHT | wx.FIXED_MINSIZE) actionSizer.Add(self.bntSearch, 0, wx.ALL, border=5) # textSizer. textSizer.Add(actionSizer, 0, wx.BOTTOM, 10) textSizer.Add(self.listCtrl, 1, wx.EXPAND) # btnSizer. btnSizer.Add(self.stSearch) btnSizer.Add(self.txSearch) # btnSizer.Add((5, 5), -1) # btnSizer.Add(self.bntSearch, 0, wx.ALL, 5) btnSizer.Add((5, 5), -1) btnSizer.Add(self.bntClear, 0, wx.ALL, 5) btnSizer.Add((5, 5), -1) btnSizer.Add(self.bntShowAll, 0, wx.ALL, 5) btnSizer.Add((5, 5), -1) btnSizer.Add(self.bntNew, 0, wx.ALL, 5) btnSizer.Add((5, 5), -1) btnSizer.Add(self.bntEdit, 0, wx.ALL, 5) btnSizer.Add((5, 5), -1) btnSizer.Add(self.bntDelete, 0, wx.ALL, 5) btnSizer.Add((5, 5), -1) btnSizer.Add(self.bntClose, 0, wx.ALL, 5) # Assign to mainSizer the other sizers. mainSizer.Add(textSizer, 1, wx.ALL | wx.EXPAND, 10) mainSizer.Add(btnSizer, 0, wx.ALL, 10) mainSizer.Hide(btnSizer) # Assign to panel the mainSizer. self.panel.SetSizer(mainSizer) mainSizer.Fit(self) # mainSizer.SetSizeHints(self) def OnOpenWidgetInspector(self, event): """ Activate the widget inspection tool, giving it a widget to preselect in the tree. Use either the one under the cursor, if any, or this frame. """ from wx.lib.inspection import InspectionTool wnd = wx.FindWindowAtPointer() if not wnd: wnd = self InspectionTool().Show(wnd, True) def on_combobox(self, event): print("选择{0}".format(event.GetString())) def on_choice_a(self, event): self.adminid = event.GetString() print("选择网点id:{}".format(self.adminid)) self.ch_master.Clear() self.getAllMasters() def getAllMasters(self): self.masters = searchutil.getMasters(self.adminid) # masterStr = [] # self.ch_master.Client(0, None) for index, master in enumerate(self.masters): if index != 0: self.userids.append(str(master['userid'])) # masterStr.append(str(master['username'])) self.ch_master.Append(master['username'], master) # self.ch_master.SetItems(masterStr) # 可行 self.ch_master.SetSelection(0) def on_choice_m(self, event): print("选择操作人:{}".format(event.GetString())) print("选择到操作人的其他参数:{}".format(event.GetClientData())) def OnDateChanged(self, evt): print("OnDateChanged: %s\n" % evt.GetDate()) # self.log.write("OnDateChanged: %s\n" % evt.GetDate()) self.startdate = self.getTodayDate(wx.wxdate2pydate(evt.GetDate()), 0) print("OnDateChanged2 startdate: %s\n" % self.startdate) pass def OnDateChanged2(self, evt): print("OnDateChanged2: %s\n" % evt.GetDate()) # self.log.write("OnDateChanged2: %s\n" % evt.GetDate()) self.enddate = self.getTodayDate(wx.wxdate2pydate(evt.GetDate()), 1) print("OnDateChanged2 enddate: %s\n" % self.enddate) pass def OnSearch(self, event): print("OnSearch") # print(self.ch_adminid.GetSelection()) # 选中的索引 # itemObject = self.ch_adminid.GetClientData(self.ch_adminid.GetSelection()) if self.ch_master.GetSelection() == 0: # userid = self.ch_master.GetItems() # 获取到了所有的展示名称列表 userid = self.userids else: userid = self.ch_master.GetClientData(self.ch_master.GetSelection()) print("adminid={}, userid={}".format(self.adminid, userid)) print("startdate={}, enddate={}".format(self.startdate, self.enddate)) self.updateList(searchutil.getOperators(self.adminid, userid, self.startdate.strftime('%Y-%m-%d %H:%M:%S'), self.enddate.strftime('%Y-%m-%d %H:%M:%S'))) pass def updateList(self, datas): self.listCtrl.SetFocus() self.listCtrl.DeleteAllItems() print(datas) # Populate the wx.ListCtrl. for _index, _data in enumerate(datas): index = self.listCtrl.InsertItem(self.listCtrl.GetItemCount(),str(_index + 1)) if not _data['datas']: print() for items in _data['datas']: print(items) data = {} username = '' # 操作类别:1:建单 2:派单 3:审核 4:结算 5:回访 for item in items: username = item['username'] data[str(item['opertype'])] = item['total_count'] self.listCtrl.SetItem(index, 1, username) self.listCtrl.SetItem(index, 2, 0 if '1' not in data else data['1']) self.listCtrl.SetItem(index, 3, 0 if '2' not in data else data['2']) self.listCtrl.SetItem(index, 4, 0 if '3' not in data else data['3']) self.listCtrl.SetItem(index, 5, 0 if '4' not in data else data['4']) self.listCtrl.SetItem(index, 6, 0 if '5' not in data else data['5']) self.listCtrl.SetItemImage(self.listCtrl.GetItemCount() - 1, self.img_idx) # Alternate the row colors of a ListCtrl. # Mike Driscoll... thank you ! if index % 2: self.listCtrl.SetItemBackgroundColour(index, "#ffffff") else: self.listCtrl.SetItemBackgroundColour(index, "#ece9d8") # ecf3fd @ staticmethod def OnAbout(event): message = """wangdian.bangjia.me\n 帮家报表管理系统 使用wxPython开发.\n 当前版本 : %.1f""" % VERSION wx.MessageBox(message, AppTitle, wx.OK) def OnClose(self): ret = wx.MessageBox("确定要退出吗 ?", AppTitle, wx.YES_NO | wx.ICON_QUESTION | wx.CENTRE | wx.NO_DEFAULT) return ret def OnExit(self, event): # Ask for exit. intChoice = self.OnClose() if intChoice == 2: # Disconnect from server. # self.con.OnCloseDb() self.Destroy() def OnTimer(self, event): t = time.localtime(time.time()) sbTime = time.strftime("当前时间 %d/%m/%Y are %H:%M:%S", t) self.myStatusBar.SetStatusText(sbTime, 0) class MyApp(wx.App, wx.lib.mixins.inspection.InspectionMixin): def OnInit(self, redirect=False, filename=None, useBestVisual=False, clearSigInt=True): self.SetAppName("帮家报表系统") self.InitInspection() self.installDir = os.path.split(os.path.abspath(sys.argv[0]))[0] self.locale = wx.Locale(wx.LANGUAGE_CHINESE_SIMPLIFIED) print("OnInit sys.argv[0]={}".format(sys.argv[0])) print("OnInit installDir={}".format(self.installDir)) frame = MyFrame(None, -1, title="") frame.SetSize(800, 527) self.SetTopWindow(frame) frame.Center() frame.Show(True) return True def GetInstallDir(self): """ Returns the installation directory for my application. """ return self.installDir def GetIconsDir(self): """ Returns the icons directory for my application. """ icons_dir = os.path.join(self.installDir, "icons") return icons_dir def GetBitmapsDir(self): """ Returns the bitmaps directory for my application. """ bitmaps_dir = os.path.join(self.installDir, "bitmaps") return bitmaps_dir def main(): app = MyApp(redirect=False) app.MainLoop() if __name__ == "__main__": main()
/searchutil.py
import json import requests base_url = "http://114.55.168.6/" search_api = base_url + "es-test/essearch.php" oper_api = base_url + "es-test/oper-search.php" # 操作类别:1:建单 2:派单 3:审核 4:结算 5:回访 def getAdminids(): params = dict() params['method'] = 'search' params['index'] = 'yxgoper' params['from'] = 0 params['size'] = 30 params['groupby'] = 'adminid' params['keyword'] = '' params['field_return'] = 'adminid' checkRes = requests.post(search_api, data=params) checkRes.encoding = 'utf-8' adminids = [] if checkRes and checkRes.status_code == 200: # print("获取所有网点id成功:") # print(checkRes.text) results = json.loads(checkRes.text) adminids.append('24') for element in results['element']: adminids.append(str(element['adminid'])) return adminids def getMasters(adminid): params = dict() params['method'] = 'search' params['index'] = 'yxgoper' params['from'] = 0 params['size'] = 100 params['groupby'] = 'username' params['keyword'] = '' params['field_return'] = ['username', 'userid'] params['adminid'] = adminid checkRes = requests.post(search_api, data=params) checkRes.encoding = 'utf-8' adminids = [] if checkRes and checkRes.status_code == 200: # print("获取所有网点id成功:") print(checkRes.text) results = json.loads(checkRes.text) adminids.append({'userid': '', 'username': '全部'}) for element in results['element']: adminids.append(element) return adminids # print(getMasters(24)) def getOperators(adminid, userid, start, end): params = dict() params['method'] = 'search' params['index'] = 'yxgoper' params['from'] = 0 params['size'] = 100 params['groupby'] = 'opertype' params['keyword'] = '' params['opertime'] = json.dumps([['egt', start], ['elt', end], 'and']) params['userids'] = json.dumps(userid) params['field_return'] = json.dumps(['username', 'opertype']) params['adminid'] = adminid checkRes = requests.post(oper_api, data=params) checkRes.encoding = 'utf-8' opers = [] if checkRes and checkRes.status_code == 200: # print("获取所有网点id成功:") print(checkRes.text) results = json.loads(checkRes.text) for element in results['element']: opers.append(element) return opers # print(getMasters('24')) # print(getOperators('24', ['250', '281', '23'], '2020-01-08 00:00:00', '2020-05-08 00:00:00'))
/test/http2.py
import asyncio import json import os import sys import httpx from hyper import HTTPConnection, HTTP20Connection # conn = HTTPConnection('http2bin.org:443') # conn.request('GET', '/get') # resp = conn.get_response() # # print(resp.read()) from hyper.tls import init_context from BaseUtil import BaseUtil agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36" headers = {'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': agent, 'Referer': "https://opn.jd.com/bill/search?billStatus=5", 'Upgrade-Insecure-Requests': '1', 'Host': "opn.jd.com", 'Origin': "https://opn.jd.com", 'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 'Accept': 'application/json, text/plain, */*'} data = {"sort": "billId", "order": "desc", "billStatuses": "5", "isEgBuy": "0", "outletsNo": "05928613279", "sortKind": "4", "page": "1", "rows": "10", "isAppliance": "1", } result = "" for item in data: result += item + "=" + data[item] + "&" result = result[:-1] # 修改路径 realpath = os.path.dirname(os.path.realpath(sys.argv[0])) print("realpath>>>>", realpath) cafile = os.path.join(realpath, "resource", 'pem', "certs.pem") print("cert_loc cafile>>>",cafile) conn = HTTP20Connection(host='opn.jd.com', port=443, ssl_context=init_context(cafile)) cookie = BaseUtil.getCookie([{"domain": ".jd.com"}]) headers['Cookie'] = cookie headers[':authority'] = 'opn.jd.com' headers[':method'] = 'POST' headers[':path'] = '/bill/query.json' headers[':scheme'] = 'https' response = conn.request(method='POST', url='https://opn.jd.com/bill/query.json', body=result, headers=headers) resp = conn.get_response(response) print(resp.status) res = resp.read() print(res) print(json.loads(res)) # async def test(): # async with httpx.AsyncClient(http2=True) as client: # r = await client.post('https://opn.jd.com/bill/query.json', data=data, headers=headers) # print(r.text) # # # asyncio.run(test())
/test/test_re.py
import re string = "originOrgId: 'WDCN02431'," print(re.findall(re.compile(r"originOrgId: ['](.*?)[']", re.S), string)[0])
/test_text.py
#!/usr/bin/env python import time import wx import wx.adv #---------------------------------------------------------------------- class TestPanel(wx.Panel): def __init__(self, parent, log): self.log = log wx.Panel.__init__(self, parent, -1) textSizer = wx.BoxSizer(wx.VERTICAL) # self.stEmployees = wx.StaticText(self, -1, "你好,这个是测试文本", style=wx.ALIGN_CENTER) # self.stEmployees.SetForegroundColour("gray") # self.stEmployees.SetFont(font) # textSizer.Add(self.stEmployees, flag=wx.CENTER) title = wx.StaticText(self, -1, "This is an example of static text", style=wx.ALIGN_CENTER) center = wx.StaticText(self, -1, "align center", style=wx.ALIGN_CENTER) center.SetForegroundColour('white') center.SetBackgroundColour('black') textSizer.Add(title, 0, wx.EXPAND, 10) textSizer.Add(center, 0, wx.EXPAND, 10) self.SetSizer(textSizer) textSizer.Fit(self) import datetime def subtime(date1, date2): date1 = datetime.datetime.strptime(date1, "%Y-%m-%d %H:%M:%S") date2 = datetime.datetime.strptime(date2, "%Y-%m-%d %H:%M:%S") return date2 - date1 date1 = r'2015-06-19 02:38:01' date2 = r'2015-06-18 05:31:22' # print(time.gmtime()) print(subtime(date1, date2)) # date1 > date2 print(subtime(date2, date1)) # date1 < date2 nowdate = datetime.datetime.now() # 获取当前时间 nowdate = nowdate.strftime("%Y-%m-%d %H:%M:%S") # 当前时间转换为指定字符串格式 print(subtime(date2, nowdate)) # nowdate > date2 # In some cases the widget used above will be a native date # picker, so show the generic one too. # dpc = wx.adv.DatePickerCtrlGeneric(self, size=(120,-1), # style = wx.TAB_TRAVERSAL # | wx.adv.DP_DROPDOWN # | wx.adv.DP_SHOWCENTURY # | wx.adv.DP_ALLOWNONE ) # self.Bind(wx.adv.EVT_DATE_CHANGED, self.OnDateChanged, dpc) # sizer.Add(dpc, 0, wx.LEFT, 50) def OnDateChanged(self, evt): self.log.write("OnDateChanged: %s\n" % evt.GetDate()) #---------------------------------------------------------------------- def runTest(frame, nb, log): win = TestPanel(nb, log) return win #---------------------------------------------------------------------- overview = """<html><body> <h2><center>wx.DatePickerCtrl</center></h2> This control allows the user to select a date. Unlike wx.calendar.CalendarCtrl, which is a relatively big control, wx.DatePickerCtrl is implemented as a small window showing the currently selected date. The control can be edited using the keyboard, and can also display a popup window for more user-friendly date selection, depending on the styles used and the platform. </body></html> """ if __name__ == '__main__': import sys,os import run run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
joshharper64/frost
refs/heads/master
{"/resident_reports/admin.py": ["/resident_reports/models.py"], "/resident_reports/views.py": ["/resident_reports/models.py"]}
└── ├── homepage │ └── views.py └── resident_reports ├── admin.py ├── apps.py ├── migrations │ └── 0003_auto_20170517_0033.py ├── models.py ├── urls.py └── views.py
/homepage/views.py
from django.shortcuts import render def index(request): """Homepage""" return render(request, 'homepage/index.html') def about(request): """About Section""" return render(request, 'homepage/about.html')
/resident_reports/admin.py
from django.contrib import admin from resident_reports.models import Report admin.site.register(Report)
/resident_reports/apps.py
from django.apps import AppConfig class ResidentReportsConfig(AppConfig): name = 'resident_reports'
/resident_reports/migrations/0003_auto_20170517_0033.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-05-17 00:33 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('resident_reports', '0002_report'), ] operations = [ migrations.RemoveField( model_name='report', name='topic', ), migrations.DeleteModel( name='Topic', ), ]
/resident_reports/models.py
from django.db import models from django.contrib.auth.models import User class Report(models.Model): """ Report by User """ text = models.TextField() date_added = models.DateTimeField(auto_now_add=True) user_name = models.ForeignKey(User) class Meta: verbose_name_plural = 'reports' def __str__(self): return self.text[:50] + "..."
/resident_reports/urls.py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^allreports/$', views.allreports, name='allreports'), url(r'^new_report/$', views.new_report, name='new_report'), url(r'^edit_report/(?P<report_id>\d+)/$', views.edit_report, name='edit_report'), ]
/resident_reports/views.py
from django.shortcuts import render from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.contrib.auth import get_user_model from .models import Report from .forms import ReportForm def allreports(request): """ Show list of all reports, regardless of topic """ reports = Report.objects.order_by('-date_added') context = {'reports': reports} return render(request, 'resident_reports/allreports.html', context) @login_required def new_report(request): """ Add new report """ if request.method != 'POST': form = ReportForm() else: form = ReportForm(data=request.POST) if form.is_valid(): new_entry = form.save(commit=False) new_entry.user_name = request.user form.save() return HttpResponseRedirect(reverse('resident_reports:allreports')) context = {'form': form} return render(request, 'resident_reports/new_report.html', context) @login_required def edit_report(request, entry_id): """ Edit an existing report """ report = Report.objects.get(id=entry_id) if report.owner != request.owner: return HttpResponseRedirect(reverse('resident_reports:allreports')) if request.method != 'POST': form = ReportForm(instance=report) else: form = ReportForm(instance=entry, data=request.POST) if form.is_valid: form.save() return HttpResponseRedirect(reverse('resident_reports:allreports')) context = {'report': report, 'form': form} return render(request, 'resident_reports/edit_report.html', context)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
nforsch/SSCP19-mechanics-project7
refs/heads/master
{"/compute_displacement_subset.py": ["/demo.py"], "/compute_surface_nodes.py": ["/demo.py"]}
└── ├── compute_displacement_subset.py ├── compute_surface_nodes.py ├── create_ellipsoid.py ├── demo.py ├── lhs.py └── pca.py
/compute_displacement_subset.py
import os import numpy as np import dolfin as df import pulse import ldrb import matplotlib.pyplot as plt from scipy import spatial from demo import load_geometry pi = np.pi def cart2prolate( focalLength, XYZ ): # Convert Cartesian XYZ to Prolate TML # TML[0] = theta, TML[1] = mu, TML[2] = lambda X = XYZ.T[0] Y = XYZ.T[1] Z = XYZ.T[2] r1 = np.sqrt( Y**2 + Z**2 + (X+focalLength)**2 ) r2 = np.sqrt( Y**2 + Z**2 + (X-focalLength)**2 ) lmbda = np.real( np.arccosh((r1+r2)/(2*focalLength)) ) mu = np.real( np.arccos((r1-r2)/(2*focalLength)) ) theta = np.arctan2(Z,Y) idx = theta<0 theta[idx] = theta[idx] + 2*np.pi TML = np.concatenate(([theta], [mu], [lmbda])) return TML def prolate2cart( focalLength, TML ): # Convert Prolate TML to Cartesian XYZ # XYZ[0] = X, XYZ[1] = Y, XYZ[2] = Z theta = TML[0] mu = TML[1] lmbda = TML[2] X = focalLength * np.cosh(lmbda) * np.cos(mu) Y = focalLength * np.sinh(lmbda) * np.sin(mu) * np.cos(theta) Z = focalLength * np.sinh(lmbda) * np.sin(mu) * np.sin(theta) XYZ = np.concatenate(([X],[Y],[Z])) return XYZ def focal( a, b, c ): focalLength = np.sqrt( a**2 - (0.5*(b+c))**2 ) return focalLength def get_surface_points(marker): coordinates = [] idxs = [] # Loop over the facets for facet in df.facets(geometry.mesh): # If the facet markers matched that of ENDO if geometry.ffun[facet] == marker: # Loop over the vertices of that facets for vertex in df.vertices(facet): idxs.append(vertex.global_index()) # coordinates.append(tuple(vertex.midpoint().array())) # Remove duplicates idxs = np.array(list(set(idxs))) coordinates = geometry.mesh.coordinates()[idxs] return coordinates, idxs def fit_prolate( P ): # Sample nodes of mesh using prolate coordinates to get displacements for # same number of points, similar regions across meshes # input P = TML from mesh endo/epi mu_max = np.amax(P[1]) # find max mu coordinate from mesh tree = spatial.KDTree(P[0:2].T) # setup tree for finding nearest point idx_match = [] sample_points = [] for theta in np.linspace(pi/2,2*pi,4): # theta range for mu in np.linspace(0,mu_max,5): # mu ranges from 0 to mu_max based on mesh sample_points.append([theta,mu]) # list of sampled [theta,mu] combinations distance, index = tree.query([theta,mu]) # find closest point idx_match.append(index) # store index of point in endo or epi return idx_match # Define coordinates of ED mesh for endo and epi geometry = load_geometry('ellipsoid.h5') # Get nodes ENDO marker_endo = geometry.markers['ENDO'][0] endo_coordinates, endo_idxs = get_surface_points(marker_endo) # Get nodes EPI marker_epi = geometry.markers['EPI'][0] epi_coordinates, epi_idxs = get_surface_points(marker_epi) # convert Cartesian coordinates to Prolate, find maximum mu value focalLength_endo = focal(4.1,1.6,1.6) # same parameters [a,b,c] used for mesh focalLength_epi = focal(5,2.9,2.9) # same parameters [a,b,c] used for mesh TML_endo = cart2prolate(focalLength_endo, endo_coordinates) TML_epi = cart2prolate(focalLength_epi, epi_coordinates) # XYZ_endo = prolate2cart(focalLength_endo,TML_endo) # check return XYZ from TML # Find fit to closest node by varying theta, mu and fitting lambda (store index of node) idx_match_endo = fit_prolate(TML_endo) idx_match_epi = fit_prolate(TML_epi) idx_node_endo = endo_idxs[idx_match_endo].tolist() idx_node_epi = epi_idxs[idx_match_epi].tolist() idx_nodes = idx_node_endo + idx_node_epi # Get displacement between ES and ED using idx_nodes print('Loading ED and ES mesh coordinates...') ed_coordinates = np.loadtxt('coords_ED.txt',delimiter=',') es_coordinates = np.loadtxt('coords_ES.txt',delimiter=',') displacement = es_coordinates-ed_coordinates # calculate displacement between ED and ES disp_out = displacement[idx_nodes] # get displacement for nodes in list idx_nodes print('Saving displacements for %d points' %(len(idx_nodes))) np.savetxt('displacement.txt',disp_out,fmt='%.8f',delimiter=',') # from IPython import embed; embed()
/compute_surface_nodes.py
import dolfin as df from demo import load_geometry geometry = load_geometry() endo_coordinates = [] endo_marker = geometry.markers['ENDO'][0] # Loop over the facets for facet in df.facets(geometry.mesh): # If the facet markers matched that of ENDO if geometry.ffun[facet] == endo_marker: # Loop over the vertices of that facets for vertex in df.vertices(facet): endo_coordinates.append(tuple(vertex.midpoint().array())) # Remove duplicates endo_coordinates = set(endo_coordinates)
/create_ellipsoid.py
import os import numpy as np import dolfin as df import pulse import ldrb def create_geometry(h5name): """ Create an lv-ellipsoidal mesh and fiber fields using LDRB algorithm An ellipsoid is given by the equation .. math:: \frac{x^2}{a} + \frac{y^2}{b} + \frac{z^2}{c} = 1 We create two ellipsoids, one for the endocardium and one for the epicardium and subtract them and then cut the base. For simplicity we assume that the longitudinal axis is in in :math:`x`-direction and as default the base is located at the :math:`x=0` plane. """ # Number of subdivision (higher -> finer mesh) N = 13 # Parameter for the endo ellipsoid a_endo = 1.5 b_endo = 0.5 c_endo = 0.5 # Parameter for the epi ellipsoid a_epi = 2.0 b_epi = 1.0 c_epi = 1.0 # Center of the ellipsoid (same of endo and epi) center = (0.0, 0.0, 0.0) # Location of the base base_x = 0.0 # Create a lv ellipsoid mesh with longitudinal axis along the x-axis geometry = ldrb.create_lv_mesh( N=N, a_endo=a_endo, b_endo=b_endo, c_endo=c_endo, a_epi=a_epi, b_epi=b_epi, c_epi=c_epi, center=center, base_x=base_x ) # Select fiber angles for rule based algorithm angles = dict(alpha_endo_lv=60, # Fiber angle on the endocardium alpha_epi_lv=-60, # Fiber angle on the epicardium beta_endo_lv=0, # Sheet angle on the endocardium beta_epi_lv=0) # Sheet angle on the epicardium fiber_space = 'Lagrange_1' # Compte the microstructure fiber, sheet, sheet_normal = ldrb.dolfin_ldrb(mesh=geometry.mesh, fiber_space=fiber_space, ffun=geometry.ffun, markers=geometry.markers, **angles) # Compute focal point focal = np.sqrt(a_endo**2 - (0.5 * (b_endo + c_endo))**2) # Make mesh according to AHA-zons pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal) mapper = {'lv': 'ENDO', 'epi': 'EPI', 'rv': 'ENDO_RV', 'base': 'BASE'} m = {mapper[k]: (v, 2) for k, v in geometry.markers.items()} pulse.geometry_utils.save_geometry_to_h5( geometry.mesh, h5name, markers=m, fields=[fiber, sheet, sheet_normal] ) create_geometry('ellipsoid.h5')
/demo.py
import os import numpy as np import dolfin as df import pulse import ldrb import matplotlib.pyplot as plt def create_geometry(h5name): """ Create an lv-ellipsoidal mesh and fiber fields using LDRB algorithm An ellipsoid is given by the equation .. math:: \frac{x^2}{a} + \frac{y^2}{b} + \frac{z^2}{c} = 1 We create two ellipsoids, one for the endocardium and one for the epicardium and subtract them and then cut the base. For simplicity we assume that the longitudinal axis is in in :math:`x`-direction and as default the base is located at the :math:`x=0` plane. """ # Number of subdivision (higher -> finer mesh) N = 13 # Parameter for the endo ellipsoid a_endo = 1.5 b_endo = 0.5 c_endo = 0.5 # Parameter for the epi ellipsoid a_epi = 2.0 b_epi = 1.0 c_epi = 1.0 # Center of the ellipsoid (same of endo and epi) center = (0.0, 0.0, 0.0) # Location of the base base_x = 0.0 # Create a lv ellipsoid mesh with longitudinal axis along the x-axis geometry = ldrb.create_lv_mesh( N=N, a_endo=a_endo, b_endo=b_endo, c_endo=c_endo, a_epi=a_epi, b_epi=b_epi, c_epi=c_epi, center=center, base_x=base_x ) # Select fiber angles for rule based algorithm angles = dict(alpha_endo_lv=60, # Fiber angle on the endocardium alpha_epi_lv=-60, # Fiber angle on the epicardium beta_endo_lv=0, # Sheet angle on the endocardium beta_epi_lv=0) # Sheet angle on the epicardium fiber_space = 'Lagrange_1' # Compte the microstructure fiber, sheet, sheet_normal = ldrb.dolfin_ldrb(mesh=geometry.mesh, fiber_space=fiber_space, ffun=geometry.ffun, markers=geometry.markers, **angles) # Compute focal point focal = np.sqrt(a_endo**2 - (0.5 * (b_endo + c_endo))**2) # Make mesh according to AHA-zons # pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal) pulse.geometry_utils.mark_strain_regions(mesh=geometry.mesh, foc=focal, nsectors=(15, 15, 15, 5)) mapper = {'lv': 'ENDO', 'epi': 'EPI', 'rv': 'ENDO_RV', 'base': 'BASE'} m = {mapper[k]: (v, 2) for k, v in geometry.markers.items()} pulse.geometry_utils.save_geometry_to_h5( geometry.mesh, h5name, markers=m, fields=[fiber, sheet, sheet_normal], overwrite_file=True ) def load_geometry(h5name='ellipsoid.h5', recreate=False): if not os.path.exists(h5name) or recreate: create_geometry(h5name) geo = pulse.HeartGeometry.from_file(h5name) # Scale mesh to a realistic size geo.mesh.coordinates()[:] *= 4.5 return geo def save_geometry_vis(geometry, folder='geometry'): """ Save the geometry as well as markers and fibers to files that can be visualized in paraview """ if not os.path.isdir(folder): os.makedirs(folder) for attr in ['mesh', 'ffun', 'cfun']: print('Save {}'.format(attr)) df.File('{}/{}.pvd'.format(folder, attr)) << getattr(geometry, attr) for attr in ['f0', 's0', 'n0']: ldrb.fiber_to_xdmf(getattr(geometry, attr), '{}/{}'.format(folder, attr)) def get_strains(u, v, dx): F = pulse.kinematics.DeformationGradient(u) E = pulse.kinematics.GreenLagrangeStrain(F, isochoric=False) return df.assemble(df.inner(E*v, v) * dx) \ / df.assemble(df.Constant(1.0) * dx) def get_nodal_coordinates(u): mesh = df.Mesh(u.function_space().mesh()) V = df.VectorFunctionSpace(mesh, "CG", 1) df.ALE.move(mesh, df.interpolate(u, V)) return mesh.coordinates() def postprocess(geometry): """ Get strain at nodal values Arguments --------- filename : str Filname where to store the results """ coords = [geometry.mesh.coordinates()] V = df.VectorFunctionSpace(geometry.mesh, "CG", 2) Ef = np.zeros((3, 17)) u_ED = df.Function(V, "ED_displacement.xml") coords.append(get_nodal_coordinates(u_ED)) for i in range(17): Ef[1, i] = get_strains(u_ED, geometry.f0, geometry.dx(i+1)) EDV = geometry.cavity_volume(u=u_ED) u_ES = df.Function(V, "ES_displacement.xml") coords.append(get_nodal_coordinates(u_ES)) for i in range(17): Ef[2, i] = get_strains(u_ES, geometry.f0, geometry.dx(i+1)) ESV = geometry.cavity_volume(u=u_ES) # Stroke volume SV = EDV - ESV # Ejection fraction EF = SV / EDV print(("EDV: {EDV:.2f} ml\nESV: {ESV:.2f} ml\nSV: {SV:.2f}" " ml\nEF: {EF:.2f}").format(EDV=EDV, ESV=ESV, SV=SV, EF=EF)) # Save nodes as txt at ED and ES np.savetxt('coords_ED.txt',coords[1],fmt='%.4f',delimiter=',') np.savetxt('coords_ES.txt',coords[2],fmt='%.4f',delimiter=',') fig, ax = plt.subplots(1, 3, sharex=True, sharey=True) for i in range(17): j = i // 6 # from IPython import embed; embed() # exit() ax[j].plot(Ef[:, i], label="region {}".format(i+1)) ax[0].set_title("Basal") ax[1].set_title("Mid") ax[2].set_title("Apical") ax[0].set_ylabel("Fiber strain") for axi in ax: axi.set_xticks(range(3)) axi.set_xticklabels(["", "ED", "ES"]) axi.legend() plt.show() def solve( geometry, EDP=1.0, ESP=15.0, Ta=60, material_parameters=None, ): """ Arguments --------- EDP : float End diastolic pressure ESP : float End systolic pressure Ta : float Peak active tension (at ES) material_parameters : dict A dictionart with parameter in the Guccione model. Default: {'C': 2.0, 'bf': 8.0, 'bt': 2.0, 'bfs': 4.0} filename : str Filname where to store the results """ # Create model activation = df.Function(df.FunctionSpace(geometry.mesh, "R", 0)) matparams = pulse.Guccione.default_parameters() if material_parameters is not None: matparams.update(material_parameters) material = pulse.Guccione(activation=activation, parameters=matparams, active_model="active_stress", f0=geometry.f0, s0=geometry.s0, n0=geometry.n0) lvp = df.Constant(0.0) lv_marker = geometry.markers['ENDO'][0] lv_pressure = pulse.NeumannBC(traction=lvp, marker=lv_marker, name='lv') neumann_bc = [lv_pressure] # Add spring term at the base with stiffness 1.0 kPa/cm^2 base_spring = 1.0 robin_bc = [pulse.RobinBC(value=df.Constant(base_spring), marker=geometry.markers["BASE"][0])] # Fix the basal plane in the longitudinal direction # 0 in V.sub(0) refers to x-direction, which is the longitudinal direction def fix_basal_plane(W): V = W if W.sub(0).num_sub_spaces() == 0 else W.sub(0) bc = df.DirichletBC(V.sub(0), df.Constant(0.0), geometry.ffun, geometry.markers["BASE"][0]) return bc dirichlet_bc = [fix_basal_plane] # Collect boundary conditions bcs = pulse.BoundaryConditions(dirichlet=dirichlet_bc, neumann=neumann_bc, robin=robin_bc) # Create the problem problem = pulse.MechanicsProblem(geometry, material, bcs) xdmf = df.XDMFFile(df.mpi_comm_world(), 'output.xdmf') # Solve the problem print(("Do an initial solve with pressure = 0 kPa " "and active tension = 0 kPa")) problem.solve() u, p = problem.state.split() xdmf.write(u, 0.0) print("LV cavity volume = {} ml".format(geometry.cavity_volume(u=u))) # Solve for ED print(("Solver for ED with pressure = {} kPa and active tension = 0 kPa" "".format(EDP))) pulse.iterate.iterate(problem, lvp, EDP, initial_number_of_steps=20) u, p = problem.state.split(deepcopy=True) xdmf.write(u, 1.0) df.File("ED_displacement.xml") << u print("LV cavity volume = {} ml".format(geometry.cavity_volume(u=u))) # Solve for ES print(("Solver for ES with pressure = {} kPa and active tension = {} kPa" "".format(ESP, Ta))) pulse.iterate.iterate(problem, lvp, ESP, initial_number_of_steps=50) pulse.iterate.iterate(problem, activation, Ta, adapt_step=False, max_iters=100, initial_number_of_steps=40) u, p = problem.state.split(deepcopy=True) xdmf.write(u, 2.0) df.File("ES_displacement.xml") << u print("LV cavity volume = {} ml".format(geometry.cavity_volume(u=u))) def main(): geometry = load_geometry(h5name='ellipsoid.h5', recreate=True) save_geometry_vis(geometry, folder='geometry') import time t0 = time.time() solve(geometry, EDP=1.0, ESP=15.0, Ta=60, material_parameters=None) t1 = time.time() print('Elapsed time = {:.2f} seconds'.format(t1 - t0)) postprocess(geometry) if __name__ == "__main__": main()
/lhs.py
from pyDOE import * from scipy.stats.distributions import norm # Latin Hypercube Sampling # see: https://pythonhosted.org/pyDOE/randomized.html # Run LHS for n factors X = lhs(4, samples=100) # lhs(n, [samples, criterion, iterations]) # Transform factors to normal distributions with means and standard deviations means = [1, 2, 3, 4] stdvs = [0.1, 0.5, 1, 0.25] for i in range(4): X[:, i] = norm(loc=means[i], scale=stdvs[i]).ppf(X[:, i])
/pca.py
# PCA demo # Uses PCA from sklearn.decomposition: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt from sklearn.decomposition import PCA import numpy as np import seaborn as sns; sns.set() # Data X_train = [] X_sample = [] # PCA pca = PCA(n_components=2) pca.fit(X_train) # pca.explained_variance_ # pca.explained_variance_ratio_ # pca.components_ # pca.mean_ # pca.singular_values_ # Transform sample data sample_weights = pca.transform(X_sample) # Recreate from component weights X_recreate = pca.mean_ + sample_weights.dot(pca.components_) # OR # X_recreate = pca.inverse_transform(sample_weights) # Plot explained variance per PC and cumulative var_ratio = pca.explained_variance_ratio_ cumsum_var = np.cumsum(var_ratio) plt.figure(figsize=(8, 6)) plt.bar(range(1,21), var_ratio.values.flatten(), color='r',alpha=0.5, align='center', label='individual explained variance') plt.step(range(1,21), cumsum_var.values.flatten(), where='mid', label='cumulative explained variance')
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
aaronn/django-rest-framework-passwordless
refs/heads/master
{"/drfpasswordless/admin.py": ["/drfpasswordless/models.py"], "/drfpasswordless/serializers.py": ["/drfpasswordless/models.py", "/drfpasswordless/utils.py"], "/drfpasswordless/signals.py": ["/drfpasswordless/models.py", "/drfpasswordless/services.py"], "/drfpasswordless/utils.py": ["/drfpasswordless/models.py"], "/drfpasswordless/views.py": ["/drfpasswordless/models.py", "/drfpasswordless/services.py", "/drfpasswordless/serializers.py"], "/tests/test_authentication.py": ["/drfpasswordless/models.py"], "/tests/test_verification.py": ["/drfpasswordless/models.py"], "/drfpasswordless/urls.py": ["/drfpasswordless/views.py"], "/tests/urls.py": ["/drfpasswordless/views.py"], "/drfpasswordless/services.py": ["/drfpasswordless/utils.py"]}
└── ├── drfpasswordless │ ├── __init__.py │ ├── admin.py │ ├── apps.py │ ├── migrations │ │ ├── 0003_callbacktoken_type.py │ │ └── 0004_auto_20200125_0853.py │ ├── models.py │ ├── serializers.py │ ├── services.py │ ├── settings.py │ ├── signals.py │ ├── urls.py │ ├── utils.py │ └── views.py └── tests ├── models.py ├── test_authentication.py ├── test_verification.py └── urls.py
/drfpasswordless/__init__.py
# -*- coding: utf-8 -*- __title__ = 'drfpasswordless' __version__ = '1.5.8' __author__ = 'Aaron Ng' __license__ = 'MIT' __copyright__ = 'Copyright 2022 Aaron Ng' # Version synonym VERSION = __version__ default_app_config = 'drfpasswordless.apps.DrfpasswordlessConfig'
/drfpasswordless/admin.py
from django.contrib import admin from django.urls import reverse from drfpasswordless.models import CallbackToken class UserLinkMixin(object): """ A mixin to add a linkable list_display user field. """ LINK_TO_USER_FIELD = 'link_to_user' def link_to_user(self, obj): link = reverse('admin:users_user_change', args=[obj.user.id]) return u'<a href={}>{}</a>'.format(link, obj.user.username) link_to_user.allow_tags = True link_to_user.short_description = 'User' class AbstractCallbackTokenInline(admin.StackedInline): max_num = 0 extra = 0 readonly_fields = ('created_at', 'key', 'type', 'is_active') fields = ('created_at', 'user', 'key', 'type', 'is_active') class CallbackInline(AbstractCallbackTokenInline): model = CallbackToken class AbstractCallbackTokenAdmin(UserLinkMixin, admin.ModelAdmin): readonly_fields = ('created_at', 'user', 'key', 'type', 'to_alias_type') list_display = ('created_at', UserLinkMixin.LINK_TO_USER_FIELD, 'key', 'type', 'is_active', 'to_alias_type') fields = ('created_at', 'user', 'key', 'type', 'is_active', 'to_alias_type') extra = 0
/drfpasswordless/apps.py
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _ class DrfpasswordlessConfig(AppConfig): name = 'drfpasswordless' verbose = _("DRF Passwordless") def ready(self): import drfpasswordless.signals
/drfpasswordless/migrations/0003_callbacktoken_type.py
# Generated by Django 3.0.2 on 2020-01-22 08:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('drfpasswordless', '0002_auto_20200122_0424'), ] operations = [ migrations.AddField( model_name='callbacktoken', name='type', field=models.CharField(choices=[('AUTH', 'Auth'), ('VERIFY', 'Verify')], default='VERIFY', max_length=20), preserve_default=False, ), ]
/drfpasswordless/migrations/0004_auto_20200125_0853.py
# Generated by Django 3.0.2 on 2020-01-25 08:53 from django.db import migrations, models import drfpasswordless.models class Migration(migrations.Migration): dependencies = [ ('drfpasswordless', '0003_callbacktoken_type'), ] operations = [ migrations.AlterField( model_name='callbacktoken', name='key', field=models.CharField(default=drfpasswordless.models.generate_numeric_token, max_length=6), ), migrations.AlterUniqueTogether( name='callbacktoken', unique_together={('is_active', 'key', 'type')}, ), ]
/drfpasswordless/models.py
import uuid from django.db import models from django.conf import settings import string from django.utils.crypto import get_random_string def generate_hex_token(): return uuid.uuid1().hex def generate_numeric_token(): """ Generate a random 6 digit string of numbers. We use this formatting to allow leading 0s. """ return get_random_string(length=6, allowed_chars=string.digits) class CallbackTokenManger(models.Manager): def active(self): return self.get_queryset().filter(is_active=True) def inactive(self): return self.get_queryset().filter(is_active=False) class AbstractBaseCallbackToken(models.Model): """ Callback Authentication Tokens These tokens present a client with their authorization token on successful exchange of a random token (email) or token (for mobile) When a new token is created, older ones of the same type are invalidated via the pre_save signal in signals.py. """ id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False, unique=True) created_at = models.DateTimeField(auto_now_add=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=None, on_delete=models.CASCADE) is_active = models.BooleanField(default=True) to_alias = models.CharField(blank=True, max_length=254) to_alias_type = models.CharField(blank=True, max_length=20) objects = CallbackTokenManger() class Meta: abstract = True get_latest_by = 'created_at' ordering = ['-id'] def __str__(self): return str(self.key) class CallbackToken(AbstractBaseCallbackToken): """ Generates a random six digit number to be returned. """ TOKEN_TYPE_AUTH = 'AUTH' TOKEN_TYPE_VERIFY = 'VERIFY' TOKEN_TYPES = ((TOKEN_TYPE_AUTH, 'Auth'), (TOKEN_TYPE_VERIFY, 'Verify')) key = models.CharField(default=generate_numeric_token, max_length=6) type = models.CharField(max_length=20, choices=TOKEN_TYPES) class Meta(AbstractBaseCallbackToken.Meta): verbose_name = 'Callback Token'
/drfpasswordless/serializers.py
import logging from django.utils.translation import gettext_lazy as _ from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.core.validators import RegexValidator from rest_framework import serializers from rest_framework.exceptions import ValidationError from drfpasswordless.models import CallbackToken from drfpasswordless.settings import api_settings from drfpasswordless.utils import verify_user_alias, validate_token_age logger = logging.getLogger(__name__) User = get_user_model() class TokenField(serializers.CharField): default_error_messages = { 'required': _('Invalid Token'), 'invalid': _('Invalid Token'), 'blank': _('Invalid Token'), 'max_length': _('Tokens are {max_length} digits long.'), 'min_length': _('Tokens are {min_length} digits long.') } class AbstractBaseAliasAuthenticationSerializer(serializers.Serializer): """ Abstract class that returns a callback token based on the field given Returns a token if valid, None or a message if not. """ @property def alias_type(self): # The alias type, either email or mobile raise NotImplementedError def validate(self, attrs): alias = attrs.get(self.alias_type) if alias: # Create or authenticate a user # Return THem if api_settings.PASSWORDLESS_REGISTER_NEW_USERS is True: # If new aliases should register new users. try: user = User.objects.get(**{self.alias_type+'__iexact': alias}) except User.DoesNotExist: user = User.objects.create(**{self.alias_type: alias}) user.set_unusable_password() user.save() else: # If new aliases should not register new users. try: user = User.objects.get(**{self.alias_type+'__iexact': alias}) except User.DoesNotExist: user = None if user: if not user.is_active: # If valid, return attrs so we can create a token in our logic controller msg = _('User account is disabled.') raise serializers.ValidationError(msg) else: msg = _('No account is associated with this alias.') raise serializers.ValidationError(msg) else: msg = _('Missing %s.') % self.alias_type raise serializers.ValidationError(msg) attrs['user'] = user return attrs class EmailAuthSerializer(AbstractBaseAliasAuthenticationSerializer): @property def alias_type(self): return 'email' email = serializers.EmailField() class MobileAuthSerializer(AbstractBaseAliasAuthenticationSerializer): @property def alias_type(self): return 'mobile' phone_regex = RegexValidator(regex=r'^\+[1-9]\d{1,14}$', message="Mobile number must be entered in the format:" " '+999999999'. Up to 15 digits allowed.") mobile = serializers.CharField(validators=[phone_regex], max_length=17) """ Verification """ class AbstractBaseAliasVerificationSerializer(serializers.Serializer): """ Abstract class that returns a callback token based on the field given Returns a token if valid, None or a message if not. """ @property def alias_type(self): # The alias type, either email or mobile raise NotImplementedError def validate(self, attrs): msg = _('There was a problem with your request.') if self.alias_type: # Get request.user # Get their specified valid endpoint # Validate request = self.context["request"] if request and hasattr(request, "user"): user = request.user if user: if not user.is_active: # If valid, return attrs so we can create a token in our logic controller msg = _('User account is disabled.') else: if hasattr(user, self.alias_type): # Has the appropriate alias type attrs['user'] = user return attrs else: msg = _('This user doesn\'t have an %s.' % self.alias_type) raise serializers.ValidationError(msg) else: msg = _('Missing %s.') % self.alias_type raise serializers.ValidationError(msg) class EmailVerificationSerializer(AbstractBaseAliasVerificationSerializer): @property def alias_type(self): return 'email' class MobileVerificationSerializer(AbstractBaseAliasVerificationSerializer): @property def alias_type(self): return 'mobile' """ Callback Token """ def token_age_validator(value): """ Check token age Makes sure a token is within the proper expiration datetime window. """ valid_token = validate_token_age(value) if not valid_token: raise serializers.ValidationError("The token you entered isn't valid.") return value class AbstractBaseCallbackTokenSerializer(serializers.Serializer): """ Abstract class inspired by DRF's own token serializer. Returns a user if valid, None or a message if not. """ phone_regex = RegexValidator(regex=r'^\+[1-9]\d{1,14}$', message="Mobile number must be entered in the format:" " '+999999999'. Up to 15 digits allowed.") email = serializers.EmailField(required=False) # Needs to be required=false to require both. mobile = serializers.CharField(required=False, validators=[phone_regex], max_length=17) token = TokenField(min_length=6, max_length=6, validators=[token_age_validator]) def validate_alias(self, attrs): email = attrs.get('email', None) mobile = attrs.get('mobile', None) if email and mobile: raise serializers.ValidationError() if not email and not mobile: raise serializers.ValidationError() if email: return 'email', email elif mobile: return 'mobile', mobile return None class CallbackTokenAuthSerializer(AbstractBaseCallbackTokenSerializer): def validate(self, attrs): # Check Aliases try: alias_type, alias = self.validate_alias(attrs) callback_token = attrs.get('token', None) user = User.objects.get(**{alias_type+'__iexact': alias}) token = CallbackToken.objects.get(**{'user': user, 'key': callback_token, 'type': CallbackToken.TOKEN_TYPE_AUTH, 'is_active': True}) if token.user == user: # Check the token type for our uni-auth method. # authenticates and checks the expiry of the callback token. if not user.is_active: msg = _('User account is disabled.') raise serializers.ValidationError(msg) if api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED \ or api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED: # Mark this alias as verified user = User.objects.get(pk=token.user.pk) success = verify_user_alias(user, token) if success is False: msg = _('Error validating user alias.') raise serializers.ValidationError(msg) attrs['user'] = user return attrs else: msg = _('Invalid Token') raise serializers.ValidationError(msg) except CallbackToken.DoesNotExist: msg = _('Invalid alias parameters provided.') raise serializers.ValidationError(msg) except User.DoesNotExist: msg = _('Invalid user alias parameters provided.') raise serializers.ValidationError(msg) except ValidationError: msg = _('Invalid alias parameters provided.') raise serializers.ValidationError(msg) class CallbackTokenVerificationSerializer(AbstractBaseCallbackTokenSerializer): """ Takes a user and a token, verifies the token belongs to the user and validates the alias that the token was sent from. """ def validate(self, attrs): try: alias_type, alias = self.validate_alias(attrs) user_id = self.context.get("user_id") user = User.objects.get(**{'id': user_id, alias_type+'__iexact': alias}) callback_token = attrs.get('token', None) token = CallbackToken.objects.get(**{'user': user, 'key': callback_token, 'type': CallbackToken.TOKEN_TYPE_VERIFY, 'is_active': True}) if token.user == user: # Mark this alias as verified success = verify_user_alias(user, token) if success is False: logger.debug("drfpasswordless: Error verifying alias.") attrs['user'] = user return attrs else: msg = _('This token is invalid. Try again later.') logger.debug("drfpasswordless: User token mismatch when verifying alias.") except CallbackToken.DoesNotExist: msg = _('We could not verify this alias.') logger.debug("drfpasswordless: Tried to validate alias with bad token.") pass except User.DoesNotExist: msg = _('We could not verify this alias.') logger.debug("drfpasswordless: Tried to validate alias with bad user.") pass except PermissionDenied: msg = _('Insufficient permissions.') logger.debug("drfpasswordless: Permission denied while validating alias.") pass raise serializers.ValidationError(msg) """ Responses """ class TokenResponseSerializer(serializers.Serializer): """ Our default response serializer. """ token = serializers.CharField(source='key') key = serializers.CharField(write_only=True)
/drfpasswordless/services.py
from django.utils.module_loading import import_string from drfpasswordless.settings import api_settings from drfpasswordless.utils import ( create_callback_token_for_user, ) class TokenService(object): @staticmethod def send_token(user, alias_type, token_type, **message_payload): token = create_callback_token_for_user(user, alias_type, token_type) send_action = None if user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys(): return True if alias_type == 'email': send_action = import_string(api_settings.PASSWORDLESS_EMAIL_CALLBACK) elif alias_type == 'mobile': send_action = import_string(api_settings.PASSWORDLESS_SMS_CALLBACK) # Send to alias success = send_action(user, token, **message_payload) return success
/drfpasswordless/settings.py
from django.conf import settings from rest_framework.settings import APISettings USER_SETTINGS = getattr(settings, 'PASSWORDLESS_AUTH', None) DEFAULTS = { # Allowed auth types, can be EMAIL, MOBILE, or both. 'PASSWORDLESS_AUTH_TYPES': ['EMAIL'], # URL Prefix for Authentication Endpoints 'PASSWORDLESS_AUTH_PREFIX': 'auth/', # URL Prefix for Verification Endpoints 'PASSWORDLESS_VERIFY_PREFIX': 'auth/verify/', # Amount of time that tokens last, in seconds 'PASSWORDLESS_TOKEN_EXPIRE_TIME': 15 * 60, # The user's email field name 'PASSWORDLESS_USER_EMAIL_FIELD_NAME': 'email', # The user's mobile field name 'PASSWORDLESS_USER_MOBILE_FIELD_NAME': 'mobile', # Marks itself as verified the first time a user completes auth via token. # Automatically unmarks itself if email is changed. 'PASSWORDLESS_USER_MARK_EMAIL_VERIFIED': False, 'PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME': 'email_verified', # Marks itself as verified the first time a user completes auth via token. # Automatically unmarks itself if mobile number is changed. 'PASSWORDLESS_USER_MARK_MOBILE_VERIFIED': False, 'PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME': 'mobile_verified', # The email the callback token is sent from 'PASSWORDLESS_EMAIL_NOREPLY_ADDRESS': None, # The email subject 'PASSWORDLESS_EMAIL_SUBJECT': "Your Login Token", # A plaintext email message overridden by the html message. Takes one string. 'PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE': "Enter this token to sign in: %s", # The email template name. 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME': "passwordless_default_token_email.html", # Your twilio number that sends the callback tokens. 'PASSWORDLESS_MOBILE_NOREPLY_NUMBER': None, # The message sent to mobile users logging in. Takes one string. 'PASSWORDLESS_MOBILE_MESSAGE': "Use this code to log in: %s", # Registers previously unseen aliases as new users. 'PASSWORDLESS_REGISTER_NEW_USERS': True, # Suppresses actual SMS for testing 'PASSWORDLESS_TEST_SUPPRESSION': False, # Context Processors for Email Template 'PASSWORDLESS_CONTEXT_PROCESSORS': [], # The verification email subject 'PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT': "Your Verification Token", # A plaintext verification email message overridden by the html message. Takes one string. 'PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE': "Enter this verification code: %s", # The verification email template name. 'PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME': "passwordless_default_verification_token_email.html", # The message sent to mobile users logging in. Takes one string. 'PASSWORDLESS_MOBILE_VERIFICATION_MESSAGE': "Enter this verification code: %s", # Automatically send verification email or sms when a user changes their alias. 'PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN': False, # What function is called to construct an authentication tokens when # exchanging a passwordless token for a real user auth token. 'PASSWORDLESS_AUTH_TOKEN_CREATOR': 'drfpasswordless.utils.create_authentication_token', # What function is called to construct a serializer for drf tokens when # exchanging a passwordless token for a real user auth token. 'PASSWORDLESS_AUTH_TOKEN_SERIALIZER': 'drfpasswordless.serializers.TokenResponseSerializer', # A dictionary of demo user's primary key mapped to their static pin 'PASSWORDLESS_DEMO_USERS': {}, 'PASSWORDLESS_EMAIL_CALLBACK': 'drfpasswordless.utils.send_email_with_callback_token', 'PASSWORDLESS_SMS_CALLBACK': 'drfpasswordless.utils.send_sms_with_callback_token', # Token Generation Retry Count 'PASSWORDLESS_TOKEN_GENERATION_ATTEMPTS': 3 } # List of settings that may be in string import notation. IMPORT_STRINGS = ( 'PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE', 'PASSWORDLESS_CONTEXT_PROCESSORS', ) api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
/drfpasswordless/signals.py
import logging from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError from django.dispatch import receiver from django.db.models import signals from drfpasswordless.models import CallbackToken from drfpasswordless.models import generate_numeric_token from drfpasswordless.settings import api_settings from drfpasswordless.services import TokenService logger = logging.getLogger(__name__) @receiver(signals.post_save, sender=CallbackToken) def invalidate_previous_tokens(sender, instance, created, **kwargs): """ Invalidates all previously issued tokens of that type when a new one is created, used, or anything like that. """ if instance.user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys(): return if isinstance(instance, CallbackToken): CallbackToken.objects.active().filter(user=instance.user, type=instance.type).exclude(id=instance.id).update(is_active=False) @receiver(signals.pre_save, sender=CallbackToken) def check_unique_tokens(sender, instance, **kwargs): """ Ensures that mobile and email tokens are unique or tries once more to generate. Note that here we've decided keys are unique even across auth and validation. We could consider relaxing this in the future as well by filtering on the instance.type. """ if instance._state.adding: # save is called on a token to create it in the db # before creating check whether a token with the same key exists if isinstance(instance, CallbackToken): unique = False tries = 0 if CallbackToken.objects.filter(key=instance.key, is_active=True).exists(): # Try N(default=3) times before giving up. while tries < api_settings.PASSWORDLESS_TOKEN_GENERATION_ATTEMPTS: tries = tries + 1 new_key = generate_numeric_token() instance.key = new_key if not CallbackToken.objects.filter(key=instance.key, is_active=True).exists(): # Leave the loop if we found a valid token that doesn't exist yet. unique = True break if not unique: # A unique value wasn't found after three tries raise ValidationError("Couldn't create a unique token even after retrying.") else: # A unique value was found immediately. pass else: # save is called on an already existing token to update it. Such as invalidating it. # in that case there is no need to check for the key. This way we both avoid an unneccessary db hit # and avoid to change key field of used tokens. pass User = get_user_model() @receiver(signals.pre_save, sender=User) def update_alias_verification(sender, instance, **kwargs): """ Flags a user's email as unverified if they change it. Optionally sends a verification token to the new endpoint. """ if isinstance(instance, User): if instance.id: if api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED is True: """ For marking email aliases as not verified when a user changes it. """ email_field = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME email_verified_field = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME # Verify that this is an existing instance and not a new one. try: user_old = User.objects.get(id=instance.id) # Pre-save object instance_email = getattr(instance, email_field) # Incoming Email old_email = getattr(user_old, email_field) # Pre-save object email if instance_email != old_email and instance_email != "" and instance_email is not None: # Email changed, verification should be flagged setattr(instance, email_verified_field, False) if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True: email_subject = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT email_plaintext = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE email_html = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME message_payload = {'email_subject': email_subject, 'email_plaintext': email_plaintext, 'email_html': email_html} success = TokenService.send_token(instance, 'email', CallbackToken.TOKEN_TYPE_VERIFY, **message_payload) if success: logger.info('drfpasswordless: Successfully sent email on updated address: %s' % instance_email) else: logger.info('drfpasswordless: Failed to send email to updated address: %s' % instance_email) except User.DoesNotExist: # User probably is just initially being created return if api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED is True: """ For marking mobile aliases as not verified when a user changes it. """ mobile_field = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME mobile_verified_field = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME # Verify that this is an existing instance and not a new one. try: user_old = User.objects.get(id=instance.id) # Pre-save object instance_mobile = getattr(instance, mobile_field) # Incoming mobile old_mobile = getattr(user_old, mobile_field) # Pre-save object mobile if instance_mobile != old_mobile and instance_mobile != "" and instance_mobile is not None: # Mobile changed, verification should be flagged setattr(instance, mobile_verified_field, False) if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True: mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE message_payload = {'mobile_message': mobile_message} success = TokenService.send_token(instance, 'mobile', CallbackToken.TOKEN_TYPE_VERIFY, **message_payload) if success: logger.info('drfpasswordless: Successfully sent SMS on updated mobile: %s' % instance_mobile) else: logger.info('drfpasswordless: Failed to send SMS to updated mobile: %s' % instance_mobile) except User.DoesNotExist: # User probably is just initially being created pass
/drfpasswordless/urls.py
from drfpasswordless.settings import api_settings from django.urls import path from drfpasswordless.views import ( ObtainEmailCallbackToken, ObtainMobileCallbackToken, ObtainAuthTokenFromCallbackToken, VerifyAliasFromCallbackToken, ObtainEmailVerificationCallbackToken, ObtainMobileVerificationCallbackToken, ) app_name = 'drfpasswordless' urlpatterns = [ path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'email/', ObtainEmailCallbackToken.as_view(), name='auth_email'), path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'mobile/', ObtainMobileCallbackToken.as_view(), name='auth_mobile'), path(api_settings.PASSWORDLESS_AUTH_PREFIX + 'token/', ObtainAuthTokenFromCallbackToken.as_view(), name='auth_token'), path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'email/', ObtainEmailVerificationCallbackToken.as_view(), name='verify_email'), path(api_settings.PASSWORDLESS_VERIFY_PREFIX + 'mobile/', ObtainMobileVerificationCallbackToken.as_view(), name='verify_mobile'), path(api_settings.PASSWORDLESS_VERIFY_PREFIX, VerifyAliasFromCallbackToken.as_view(), name='verify_token'), ]
/drfpasswordless/utils.py
import logging import os from django.contrib.auth import get_user_model from django.core.exceptions import PermissionDenied from django.core.mail import send_mail from django.template import loader from django.utils import timezone from rest_framework.authtoken.models import Token from drfpasswordless.models import CallbackToken from drfpasswordless.settings import api_settings logger = logging.getLogger(__name__) User = get_user_model() def authenticate_by_token(callback_token): try: token = CallbackToken.objects.get(key=callback_token, is_active=True, type=CallbackToken.TOKEN_TYPE_AUTH) # Returning a user designates a successful authentication. token.user = User.objects.get(pk=token.user.pk) token.is_active = False # Mark this token as used. token.save() return token.user except CallbackToken.DoesNotExist: logger.debug("drfpasswordless: Challenged with a callback token that doesn't exist.") except User.DoesNotExist: logger.debug("drfpasswordless: Authenticated user somehow doesn't exist.") except PermissionDenied: logger.debug("drfpasswordless: Permission denied while authenticating.") return None def create_callback_token_for_user(user, alias_type, token_type): token = None alias_type_u = alias_type.upper() to_alias_field = getattr(api_settings, f'PASSWORDLESS_USER_{alias_type_u}_FIELD_NAME') if user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys(): token = CallbackToken.objects.filter(user=user).first() if token: return token else: return CallbackToken.objects.create( user=user, key=api_settings.PASSWORDLESS_DEMO_USERS[user.pk], to_alias_type=alias_type_u, to_alias=getattr(user, to_alias_field), type=token_type ) token = CallbackToken.objects.create(user=user, to_alias_type=alias_type_u, to_alias=getattr(user, to_alias_field), type=token_type) if token is not None: return token return None def validate_token_age(callback_token): """ Returns True if a given token is within the age expiration limit. """ try: token = CallbackToken.objects.get(key=callback_token, is_active=True) seconds = (timezone.now() - token.created_at).total_seconds() token_expiry_time = api_settings.PASSWORDLESS_TOKEN_EXPIRE_TIME if token.user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys(): return True if seconds <= token_expiry_time: return True else: # Invalidate our token. token.is_active = False token.save() return False except CallbackToken.DoesNotExist: # No valid token. return False def verify_user_alias(user, token): """ Marks a user's contact point as verified depending on accepted token type. """ if token.to_alias_type == 'EMAIL': if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME): setattr(user, api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME, True) elif token.to_alias_type == 'MOBILE': if token.to_alias == getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME): setattr(user, api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME, True) else: return False user.save() return True def inject_template_context(context): """ Injects additional context into email template. """ for processor in api_settings.PASSWORDLESS_CONTEXT_PROCESSORS: context.update(processor()) return context def send_email_with_callback_token(user, email_token, **kwargs): """ Sends a Email to user.email. Passes silently without sending in test environment """ try: if api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS: # Make sure we have a sending address before sending. # Get email subject and message email_subject = kwargs.get('email_subject', api_settings.PASSWORDLESS_EMAIL_SUBJECT) email_plaintext = kwargs.get('email_plaintext', api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE) email_html = kwargs.get('email_html', api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME) # Inject context if user specifies. context = inject_template_context({'callback_token': email_token.key, }) html_message = loader.render_to_string(email_html, context,) send_mail( email_subject, email_plaintext % email_token.key, api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS, [getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME)], fail_silently=False, html_message=html_message,) else: logger.debug("Failed to send token email. Missing PASSWORDLESS_EMAIL_NOREPLY_ADDRESS.") return False return True except Exception as e: logger.debug("Failed to send token email to user: %d." "Possibly no email on user object. Email entered was %s" % (user.id, getattr(user, api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME))) logger.debug(e) return False def send_sms_with_callback_token(user, mobile_token, **kwargs): """ Sends a SMS to user.mobile via Twilio. Passes silently without sending in test environment. """ if api_settings.PASSWORDLESS_TEST_SUPPRESSION is True: # we assume success to prevent spamming SMS during testing. # even if you have suppression on– you must provide a number if you have mobile selected. if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER is None: return False return True base_string = kwargs.get('mobile_message', api_settings.PASSWORDLESS_MOBILE_MESSAGE) try: if api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER: # We need a sending number to send properly from twilio.rest import Client twilio_client = Client(os.environ['TWILIO_ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN']) to_number = getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME) if to_number.__class__.__name__ == 'PhoneNumber': to_number = to_number.__str__() twilio_client.messages.create( body=base_string % mobile_token.key, to=to_number, from_=api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER ) return True else: logger.debug("Failed to send token sms. Missing PASSWORDLESS_MOBILE_NOREPLY_NUMBER.") return False except ImportError: logger.debug("Couldn't import Twilio client. Is twilio installed?") return False except KeyError: logger.debug("Couldn't send SMS." "Did you set your Twilio account tokens and specify a PASSWORDLESS_MOBILE_NOREPLY_NUMBER?") except Exception as e: logger.debug("Failed to send token SMS to user: {}. " "Possibly no mobile number on user object or the twilio package isn't set up yet. " "Number entered was {}".format(user.id, getattr(user, api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME))) logger.debug(e) return False def create_authentication_token(user): """ Default way to create an authentication token""" return Token.objects.get_or_create(user=user)
/drfpasswordless/views.py
import logging from django.utils.module_loading import import_string from rest_framework import parsers, renderers, status from rest_framework.response import Response from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.views import APIView from drfpasswordless.models import CallbackToken from drfpasswordless.settings import api_settings from drfpasswordless.serializers import ( EmailAuthSerializer, MobileAuthSerializer, CallbackTokenAuthSerializer, CallbackTokenVerificationSerializer, EmailVerificationSerializer, MobileVerificationSerializer, ) from drfpasswordless.services import TokenService logger = logging.getLogger(__name__) class AbstractBaseObtainCallbackToken(APIView): """ This returns a 6-digit callback token we can trade for a user's Auth Token. """ success_response = "A login token has been sent to you." failure_response = "Unable to send you a login code. Try again later." message_payload = {} @property def serializer_class(self): # Our serializer depending on type raise NotImplementedError @property def alias_type(self): # Alias Type raise NotImplementedError @property def token_type(self): # Token Type raise NotImplementedError def post(self, request, *args, **kwargs): if self.alias_type.upper() not in api_settings.PASSWORDLESS_AUTH_TYPES: # Only allow auth types allowed in settings. return Response(status=status.HTTP_404_NOT_FOUND) serializer = self.serializer_class(data=request.data, context={'request': request}) if serializer.is_valid(raise_exception=True): # Validate - user = serializer.validated_data['user'] # Create and send callback token success = TokenService.send_token(user, self.alias_type, self.token_type, **self.message_payload) # Respond With Success Or Failure of Sent if success: status_code = status.HTTP_200_OK response_detail = self.success_response else: status_code = status.HTTP_400_BAD_REQUEST response_detail = self.failure_response return Response({'detail': response_detail}, status=status_code) else: return Response(serializer.error_messages, status=status.HTTP_400_BAD_REQUEST) class ObtainEmailCallbackToken(AbstractBaseObtainCallbackToken): permission_classes = (AllowAny,) serializer_class = EmailAuthSerializer success_response = "A login token has been sent to your email." failure_response = "Unable to email you a login code. Try again later." alias_type = 'email' token_type = CallbackToken.TOKEN_TYPE_AUTH email_subject = api_settings.PASSWORDLESS_EMAIL_SUBJECT email_plaintext = api_settings.PASSWORDLESS_EMAIL_PLAINTEXT_MESSAGE email_html = api_settings.PASSWORDLESS_EMAIL_TOKEN_HTML_TEMPLATE_NAME message_payload = {'email_subject': email_subject, 'email_plaintext': email_plaintext, 'email_html': email_html} class ObtainMobileCallbackToken(AbstractBaseObtainCallbackToken): permission_classes = (AllowAny,) serializer_class = MobileAuthSerializer success_response = "We texted you a login code." failure_response = "Unable to send you a login code. Try again later." alias_type = 'mobile' token_type = CallbackToken.TOKEN_TYPE_AUTH mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE message_payload = {'mobile_message': mobile_message} class ObtainEmailVerificationCallbackToken(AbstractBaseObtainCallbackToken): permission_classes = (IsAuthenticated,) serializer_class = EmailVerificationSerializer success_response = "A verification token has been sent to your email." failure_response = "Unable to email you a verification code. Try again later." alias_type = 'email' token_type = CallbackToken.TOKEN_TYPE_VERIFY email_subject = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT email_plaintext = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE email_html = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME message_payload = { 'email_subject': email_subject, 'email_plaintext': email_plaintext, 'email_html': email_html } class ObtainMobileVerificationCallbackToken(AbstractBaseObtainCallbackToken): permission_classes = (IsAuthenticated,) serializer_class = MobileVerificationSerializer success_response = "We texted you a verification code." failure_response = "Unable to send you a verification code. Try again later." alias_type = 'mobile' token_type = CallbackToken.TOKEN_TYPE_VERIFY mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE message_payload = {'mobile_message': mobile_message} class AbstractBaseObtainAuthToken(APIView): """ This is a duplicate of rest_framework's own ObtainAuthToken method. Instead, this returns an Auth Token based on our 6 digit callback token and source. """ serializer_class = None def post(self, request, *args, **kwargs): serializer = self.serializer_class(data=request.data) if serializer.is_valid(raise_exception=True): user = serializer.validated_data['user'] token_creator = import_string(api_settings.PASSWORDLESS_AUTH_TOKEN_CREATOR) (token, _) = token_creator(user) if token: TokenSerializer = import_string(api_settings.PASSWORDLESS_AUTH_TOKEN_SERIALIZER) token_serializer = TokenSerializer(data=token.__dict__, partial=True) if token_serializer.is_valid(): # Return our key for consumption. return Response(token_serializer.data, status=status.HTTP_200_OK) else: logger.error("Couldn't log in unknown user. Errors on serializer: {}".format(serializer.error_messages)) return Response({'detail': 'Couldn\'t log you in. Try again later.'}, status=status.HTTP_400_BAD_REQUEST) class ObtainAuthTokenFromCallbackToken(AbstractBaseObtainAuthToken): """ This is a duplicate of rest_framework's own ObtainAuthToken method. Instead, this returns an Auth Token based on our callback token and source. """ permission_classes = (AllowAny,) serializer_class = CallbackTokenAuthSerializer class VerifyAliasFromCallbackToken(APIView): """ This verifies an alias on correct callback token entry using the same logic as auth. Should be refactored at some point. """ serializer_class = CallbackTokenVerificationSerializer def post(self, request, *args, **kwargs): serializer = self.serializer_class(data=request.data, context={'user_id': self.request.user.id}) if serializer.is_valid(raise_exception=True): return Response({'detail': 'Alias verified.'}, status=status.HTTP_200_OK) else: logger.error("Couldn't verify unknown user. Errors on serializer: {}".format(serializer.error_messages)) return Response({'detail': 'We couldn\'t verify this alias. Try again later.'}, status.HTTP_400_BAD_REQUEST)
/tests/models.py
from django.contrib.auth.models import AbstractBaseUser from django.contrib.auth.models import BaseUserManager from django.core.validators import RegexValidator from django.db import models phone_regex = RegexValidator(regex=r'^\+[1-9]\d{1,14}$', message="Mobile number must be entered in the format:" " '+999999999'. Up to 15 digits allowed.") class CustomUser(AbstractBaseUser): email = models.EmailField(max_length=255, unique=True, blank=True, null=True) email_verified = models.BooleanField(default=False) mobile = models.CharField(validators=[phone_regex], max_length=17, unique=True, blank=True, null=True) mobile_verified = models.BooleanField(default=False) objects = BaseUserManager() USERNAME_FIELD = 'email' class Meta: app_label = 'tests'
/tests/test_authentication.py
from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase from django.contrib.auth import get_user_model from django.urls import reverse from drfpasswordless.settings import api_settings, DEFAULTS from drfpasswordless.utils import CallbackToken User = get_user_model() class EmailSignUpCallbackTokenTests(APITestCase): def setUp(self): api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com' self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME self.url = reverse('drfpasswordless:auth_email') def test_email_signup_failed(self): email = 'failedemail182+' data = {'email': email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_email_signup_success(self): email = 'aaron@example.com' data = {'email': email} # Verify user doesn't exist yet user = User.objects.filter(**{self.email_field_name: 'aaron@example.com'}).first() # Make sure our user isn't None, meaning the user was created. self.assertEqual(user, None) # verify a new user was created with serializer response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) user = User.objects.get(**{self.email_field_name: 'aaron@example.com'}) self.assertNotEqual(user, None) # Verify a token exists for the user self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 1) def test_email_signup_disabled(self): api_settings.PASSWORDLESS_REGISTER_NEW_USERS = False # Verify user doesn't exist yet user = User.objects.filter(**{self.email_field_name: 'aaron@example.com'}).first() # Make sure our user isn't None, meaning the user was created. self.assertEqual(user, None) email = 'aaron@example.com' data = {'email': email} # verify a new user was not created response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) user = User.objects.filter(**{self.email_field_name: 'aaron@example.com'}).first() self.assertEqual(user, None) # Verify no token was created for the user self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 0) def tearDown(self): api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS'] api_settings.PASSWORDLESS_REGISTER_NEW_USERS = DEFAULTS['PASSWORDLESS_REGISTER_NEW_USERS'] class EmailLoginCallbackTokenTests(APITestCase): def setUp(self): api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL'] api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com' self.email = 'aaron@example.com' self.url = reverse('drfpasswordless:auth_email') self.challenge_url = reverse('drfpasswordless:auth_token') self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME self.user = User.objects.create(**{self.email_field_name: self.email}) def test_email_auth_failed(self): data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias challenge_data = {'email': self.email, 'token': '123456'} # Send an arbitrary token instead # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST) def test_email_auth_missing_alias(self): data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() challenge_data = {'token': callback_token} # Missing Alias # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST) def test_email_auth_bad_alias(self): data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() challenge_data = {'email': 'abcde@example.com', 'token': callback_token} # Bad Alias # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST) def test_email_auth_expired(self): data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() challenge_data = {'email': self.email, 'token': callback_token} data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Second token sent to alias second_callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() second_challenge_data = {'email': self.email, 'token': second_callback_token} # Try to auth with the old callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST) # Try to auth with the new callback token second_challenge_response = self.client.post(self.challenge_url, second_challenge_data) self.assertEqual(second_challenge_response.status_code, status.HTTP_200_OK) # Verify Auth Token auth_token = second_challenge_response.data['token'] self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key) def test_email_auth_success(self): data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() challenge_data = {'email': self.email, 'token': callback_token} # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_200_OK) # Verify Auth Token auth_token = challenge_response.data['token'] self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key) def tearDown(self): api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES'] api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS'] self.user.delete() """ Mobile Tests """ class MobileSignUpCallbackTokenTests(APITestCase): def setUp(self): api_settings.PASSWORDLESS_TEST_SUPPRESSION = True api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE'] api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000' self.url = reverse('drfpasswordless:auth_mobile') self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME def test_mobile_signup_failed(self): mobile = 'sidfj98zfd' data = {'mobile': mobile} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_mobile_signup_success(self): mobile = '+15551234567' data = {'mobile': mobile} # Verify user doesn't exist yet user = User.objects.filter(**{self.mobile_field_name: '+15551234567'}).first() # Make sure our user isn't None, meaning the user was created. self.assertEqual(user, None) # verify a new user was created with serializer response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) user = User.objects.get(**{self.mobile_field_name: '+15551234567'}) self.assertNotEqual(user, None) # Verify a token exists for the user self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 1) def test_mobile_signup_disabled(self): api_settings.PASSWORDLESS_REGISTER_NEW_USERS = False # Verify user doesn't exist yet user = User.objects.filter(**{self.mobile_field_name: '+15557654321'}).first() # Make sure our user isn't None, meaning the user was created. self.assertEqual(user, None) mobile = '+15557654321' data = {'mobile': mobile} # verify a new user was not created response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) user = User.objects.filter(**{self.mobile_field_name: '+15557654321'}).first() self.assertEqual(user, None) # Verify no token was created for the user self.assertEqual(CallbackToken.objects.filter(user=user, is_active=True).exists(), 0) def tearDown(self): api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION'] api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES'] api_settings.PASSWORDLESS_REGISTER_NEW_USERS = DEFAULTS['PASSWORDLESS_REGISTER_NEW_USERS'] api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER'] def dummy_token_creator(user): token = Token.objects.create(key="dummy", user=user) return (token, True) class OverrideTokenCreationTests(APITestCase): def setUp(self): super().setUp() api_settings.PASSWORDLESS_AUTH_TOKEN_CREATOR = 'tests.test_authentication.dummy_token_creator' api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL'] api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com' self.email = 'aaron@example.com' self.url = reverse('drfpasswordless:auth_email') self.challenge_url = reverse('drfpasswordless:auth_token') self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME self.user = User.objects.create(**{self.email_field_name: self.email}) def test_token_creation_gets_overridden(self): """Ensure that if we change the token creation function, the overridden one gets called""" data = {'email': self.email} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() challenge_data = {'email': self.email, 'token': callback_token} # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_200_OK) # Verify Auth Token auth_token = challenge_response.data['token'] self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key) self.assertEqual('dummy', Token.objects.filter(key=auth_token).first().key) def tearDown(self): api_settings.PASSWORDLESS_AUTH_TOKEN_CREATOR = DEFAULTS['PASSWORDLESS_AUTH_TOKEN_CREATOR'] api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES'] api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS'] self.user.delete() super().tearDown() class MobileLoginCallbackTokenTests(APITestCase): def setUp(self): api_settings.PASSWORDLESS_TEST_SUPPRESSION = True api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE'] api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000' self.mobile = '+15551234567' self.url = reverse('drfpasswordless:auth_mobile') self.challenge_url = reverse('drfpasswordless:auth_token') self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME self.user = User.objects.create(**{self.mobile_field_name: self.mobile}) def test_mobile_auth_failed(self): data = {'mobile': self.mobile} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias challenge_data = {'mobile': self.mobile, 'token': '123456'} # Send an arbitrary token instead # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST) def test_mobile_auth_expired(self): data = {'mobile': self.mobile} first_response = self.client.post(self.url, data) self.assertEqual(first_response.status_code, status.HTTP_200_OK) # Token sent to alias first_callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() first_challenge_data = {'mobile': self.mobile, 'token': first_callback_token} data = {'mobile': self.mobile} second_response = self.client.post(self.url, data) self.assertEqual(second_response.status_code, status.HTTP_200_OK) # Second token sent to alias second_callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() second_challenge_data = {'mobile': self.mobile, 'token': second_callback_token} # Try to auth with the old callback token challenge_response = self.client.post(self.challenge_url, first_challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_400_BAD_REQUEST) # Try to auth with the new callback token second_challenge_response = self.client.post(self.challenge_url, second_challenge_data) self.assertEqual(second_challenge_response.status_code, status.HTTP_200_OK) # Verify Auth Token auth_token = second_challenge_response.data['token'] self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key) def test_mobile_auth_success(self): data = {'mobile': self.mobile} response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) # Token sent to alias callback_token = CallbackToken.objects.filter(user=self.user, is_active=True).first() challenge_data = {'mobile': self.mobile, 'token': callback_token} # Try to auth with the callback token challenge_response = self.client.post(self.challenge_url, challenge_data) self.assertEqual(challenge_response.status_code, status.HTTP_200_OK) # Verify Auth Token auth_token = challenge_response.data['token'] self.assertEqual(auth_token, Token.objects.filter(key=auth_token).first().key) def tearDown(self): api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION'] api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES'] api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER'] self.user.delete()
/tests/test_verification.py
from rest_framework import status from rest_framework.authtoken.models import Token from django.utils.translation import gettext_lazy as _ from rest_framework.test import APITestCase from django.contrib.auth import get_user_model from django.urls import reverse from drfpasswordless.settings import api_settings, DEFAULTS from drfpasswordless.utils import CallbackToken User = get_user_model() class AliasEmailVerificationTests(APITestCase): def setUp(self): api_settings.PASSWORDLESS_AUTH_TYPES = ['EMAIL'] api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = 'noreply@example.com' api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED = True self.url = reverse('drfpasswordless:auth_email') self.callback_url = reverse('drfpasswordless:auth_token') self.verify_url = reverse('drfpasswordless:verify_email') self.callback_verify = reverse('drfpasswordless:verify_token') self.email_field_name = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME self.email_verified_field_name = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME def test_email_unverified_to_verified_and_back(self): email = 'aaron@example.com' email2 = 'aaron2@example.com' data = {'email': email} # create a new user response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) user = User.objects.get(**{self.email_field_name: email}) self.assertNotEqual(user, None) self.assertEqual(getattr(user, self.email_verified_field_name), False) # Verify a token exists for the user, sign in and check verified again callback = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_AUTH, is_active=True).first() callback_data = {'email': email, 'token': callback} callback_response = self.client.post(self.callback_url, callback_data) self.assertEqual(callback_response.status_code, status.HTTP_200_OK) # Verify we got the token, then check and see that email_verified is now verified token = callback_response.data['token'] self.assertEqual(token, Token.objects.get(user=user).key) # Refresh and see that the endpoint is now verified as True user.refresh_from_db() self.assertEqual(getattr(user, self.email_verified_field_name), True) # Change email, should result in flag changing to false setattr(user, self.email_field_name, email2) user.save() user.refresh_from_db() self.assertEqual(getattr(user, self.email_verified_field_name), False) # Verify self.client.force_authenticate(user) verify_response = self.client.post(self.verify_url) self.assertEqual(verify_response.status_code, status.HTTP_200_OK) # Refresh User user = User.objects.get(**{self.email_field_name: email2}) self.assertNotEqual(user, None) self.assertNotEqual(getattr(user, self.email_field_name), None) self.assertEqual(getattr(user, self.email_verified_field_name), False) # Post callback token back. verify_token = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_VERIFY, is_active=True).first() self.assertNotEqual(verify_token, None) verify_callback_response = self.client.post(self.callback_verify, {'email': email2, 'token': verify_token.key}) self.assertEqual(verify_callback_response.status_code, status.HTTP_200_OK) # Refresh User user = User.objects.get(**{self.email_field_name: email2}) self.assertNotEqual(user, None) self.assertNotEqual(getattr(user, self.email_field_name), None) self.assertEqual(getattr(user, self.email_verified_field_name), True) def tearDown(self): api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES'] api_settings.PASSWORDLESS_EMAIL_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_EMAIL_NOREPLY_ADDRESS'] api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED = DEFAULTS['PASSWORDLESS_USER_MARK_MOBILE_VERIFIED'] class AliasMobileVerificationTests(APITestCase): def setUp(self): api_settings.PASSWORDLESS_TEST_SUPPRESSION = True api_settings.PASSWORDLESS_AUTH_TYPES = ['MOBILE'] api_settings.PASSWORDLESS_MOBILE_NOREPLY_NUMBER = '+15550000000' api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED = True self.url = reverse('drfpasswordless:auth_mobile') self.callback_url = reverse('drfpasswordless:auth_token') self.verify_url = reverse('drfpasswordless:verify_mobile') self.callback_verify = reverse('drfpasswordless:verify_token') self.mobile_field_name = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME self.mobile_verified_field_name = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME def test_mobile_unverified_to_verified_and_back(self): mobile = '+15551234567' mobile2 = '+15557654321' data = {'mobile': mobile} # create a new user response = self.client.post(self.url, data) self.assertEqual(response.status_code, status.HTTP_200_OK) user = User.objects.get(**{self.mobile_field_name: mobile}) self.assertNotEqual(user, None) self.assertEqual(getattr(user, self.mobile_verified_field_name), False) # Verify a token exists for the user, sign in and check verified again callback = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_AUTH, is_active=True).first() callback_data = {'mobile': mobile, 'token': callback} callback_response = self.client.post(self.callback_url, callback_data) self.assertEqual(callback_response.status_code, status.HTTP_200_OK) # Verify we got the token, then check and see that email_verified is now verified token = callback_response.data['token'] self.assertEqual(token, Token.objects.get(user=user).key) # Refresh and see that the endpoint is now verified as True user.refresh_from_db() self.assertEqual(getattr(user, self.mobile_verified_field_name), True) # Change mobile, should result in flag changing to false setattr(user, self.mobile_field_name, '+15557654321') user.save() user.refresh_from_db() self.assertEqual(getattr(user, self.mobile_verified_field_name), False) # Verify self.client.force_authenticate(user) verify_response = self.client.post(self.verify_url) self.assertEqual(verify_response.status_code, status.HTTP_200_OK) # Refresh User user = User.objects.get(**{self.mobile_field_name: mobile2}) self.assertNotEqual(user, None) self.assertNotEqual(getattr(user, self.mobile_field_name), None) self.assertEqual(getattr(user, self.mobile_verified_field_name), False) # Post callback token back. verify_token = CallbackToken.objects.filter(user=user, type=CallbackToken.TOKEN_TYPE_VERIFY, is_active=True).first() self.assertNotEqual(verify_token, None) verify_callback_response = self.client.post(self.callback_verify, {'mobile': mobile2, 'token': verify_token.key}) self.assertEqual(verify_callback_response.status_code, status.HTTP_200_OK) # Refresh User user = User.objects.get(**{self.mobile_field_name: mobile2}) self.assertNotEqual(user, None) self.assertNotEqual(getattr(user, self.mobile_field_name), None) self.assertEqual(getattr(user, self.mobile_verified_field_name), True) def tearDown(self): api_settings.PASSWORDLESS_TEST_SUPPRESSION = DEFAULTS['PASSWORDLESS_TEST_SUPPRESSION'] api_settings.PASSWORDLESS_AUTH_TYPES = DEFAULTS['PASSWORDLESS_AUTH_TYPES'] api_settings.PASSWORDLESS_MOBILE_NOREPLY_ADDRESS = DEFAULTS['PASSWORDLESS_MOBILE_NOREPLY_NUMBER'] api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED = DEFAULTS['PASSWORDLESS_USER_MARK_MOBILE_VERIFIED']
/tests/urls.py
from django.urls import path, include from rest_framework.urlpatterns import format_suffix_patterns from drfpasswordless.settings import api_settings from drfpasswordless.views import (ObtainEmailCallbackToken, ObtainMobileCallbackToken, ObtainAuthTokenFromCallbackToken, VerifyAliasFromCallbackToken, ObtainEmailVerificationCallbackToken, ObtainMobileVerificationCallbackToken, ) app_name = 'drfpasswordless' urlpatterns = [ path('', include('drfpasswordless.urls')), ] format_suffix_patterns(urlpatterns)
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null
null